hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f3bfa1ad5c9aa5cbdf251e2f38f6b9d1a3bf58f
| 13,677
|
py
|
Python
|
chess/ChessUtil.py
|
vinoo999/alpha-zero-general
|
01bd6ac40d7b1fed97a84e37f7a549be8d50f668
|
[
"MIT"
] | 2
|
2018-04-01T05:08:44.000Z
|
2018-04-20T01:58:46.000Z
|
chess/ChessUtil.py
|
vinoo999/alpha-zero-general
|
01bd6ac40d7b1fed97a84e37f7a549be8d50f668
|
[
"MIT"
] | null | null | null |
chess/ChessUtil.py
|
vinoo999/alpha-zero-general
|
01bd6ac40d7b1fed97a84e37f7a549be8d50f668
|
[
"MIT"
] | null | null | null |
import re
import copy
from .ChessConstants import *
import sys
# /*****************************************************************************
# * UTILITY FUNCTIONS
# ****************************************************************************/
def generate_fen(chess):
empty = 0
fen = ''
i = SQUARES['a8']
while i < SQUARES['h1'] + 1:
# for i in range(SQUARES['a8'], SQUARES['h1']+1):
if (chess.board[i] == None):
empty+=1
else:
if (empty > 0):
fen += str(empty)
empty = 0
color = chess.board[i]['color']
piece = chess.board[i]['type']
fen += piece.upper() if (color == WHITE) else piece.lower()
if ((i + 1) & 0x88):
if (empty > 0):
fen += str(empty)
if (i != SQUARES['h1']):
# print("ADDING A /: {} {}".format(i, fen))
fen += '/'
empty = 0
i += 8
i+=1
cflags = ''
if (chess.castling[WHITE] & BITS['KSIDE_CASTLE']):
cflags += 'K'
if (chess.castling[WHITE] & BITS['QSIDE_CASTLE']):
cflags += 'Q'
if (chess.castling[BLACK] & BITS['KSIDE_CASTLE']):
cflags += 'k'
if (chess.castling[BLACK] & BITS['QSIDE_CASTLE']):
cflags += 'q'
# /* do we have an empty castling flag? */
cflags = cflags or '-'
chess.epflags = '-' if (chess.ep_square == EMPTY) else algebraic(chess.ep_square)
out_arr = [fen, chess.turn, cflags, chess.epflags, chess.half_moves, chess.move_number]
out_arr = map(str, out_arr)
return ' '.join(out_arr)
def validate_fen(fen):
errors = {
0: 'No errors.',
1: 'FEN string must contain six space-delimited fields.',
2: '6th field (move number) must be a positive integer.',
3: '5th field (half move counter) must be a non-negative integer.',
4: '4th field (en-passant square) is invalid.',
5: '3rd field (castling availability) is invalid.',
6: '2nd field (side to move) is invalid.',
7: '1st field (piece positions) does not contain 8 \'/\'-delimited rows.',
8: '1st field (piece positions) is invalid [consecutive numbers].',
9: '1st field (piece positions) is invalid [invalid piece].',
10: '1st field (piece positions) is invalid [row too large].',
11: 'Illegal en-passant square',
}
# 1st criterion: 6 space-seperated fields? */
tokens = re.split('\s+', fen)
if (len(tokens) != 6):
return {'valid': False, 'error_number': 1, 'error': errors[1]}
# 2nd criterion: move number field is a integer value > 0? */
if (not tokens[5].isdigit() or int(tokens[5]) <= 0):
return {'valid': False, 'error_number': 2, 'error' : errors[2]}
# 3rd criterion: half move counter is an integer >= 0? */
if (not tokens[4].isdigit() or int(tokens[4]) < 0):
return {'valid': False, 'error_number': 3, 'error': errors[3]}
# 4th criterion: 4th field is a valid e.p.-string? */
if (not re.search('^(-|[abcdefgh][36])$', tokens[3])):
return {'valid': False, 'error_number': 4, 'error': errors[4]}
# 5th criterion: 3th field is a valid castle-string? */
if( not re.search('^(KQ?k?q?|Qk?q?|kq?|q|-)$',tokens[2])) :
return {'valid': False, 'error_number': 5, 'error': errors[5]}
# 6th criterion: 2nd field is "w" (white) or "b" (black)? */
if (not re.search('^(w|b)$', tokens[1])) :
return {'valid': False, 'error_number': 6, 'error': errors[6]}
# 7th criterion: 1st field contains 8 rows? */
rows = tokens[0].split('/')
if (len(rows) != 8):
return {'valid': False, 'error_number': 7, 'error': errors[7]}
# 8th criterion: every row is valid? */
for i in range(len(rows)):
# check for right sum of fields AND not two numbers in succession */
sum_fields = 0
previous_was_number = False
for k in range(len(rows[i])):
if (rows[i][k].isdigit()):
if (previous_was_number):
return {'valid': False, 'error_number': 8, 'error': errors[8]}
sum_fields += int(rows[i][k])
previous_was_number = True
else:
if (not re.search('^[prnbqkPRNBQK]$',rows[i][k])):
return {'valid': False, 'error_number': 9, 'error': errors[9]}
sum_fields += 1
previous_was_number = False
if (sum_fields != 8):
return {'valid': False, 'error_number': 10, 'error': errors[10]}
if (tokens[3] != '-' and ((tokens[3][1] == '3' and tokens[1] == 'w') or \
(tokens[3][1] == '6' and tokens[1] == 'b'))):
return {'valid': False, 'error_number': 11, 'error': errors[11]}
# everything's okay! */
return {'valid': True, 'error_number': 0, 'error': errors[0]}
def move_to_san(chess, move, sloppy=None) :
'''/* convert a move from 0x88 coordinates to Standard Algebraic Notation
* (SAN)
*
* @param {boolean} sloppy Use the sloppy SAN generator to work around over
* disambiguation bugs in Fritz and Chessbase. See below:
*
* r1bqkbnr/ppp2ppp/2n5/1B1pP3/4P3/8/PPPP2PP/RNBQK1NR b KQkq - 2 4
* 4. ... Nge7 is overly disambiguated because the knight on c6 is pinned
* 4. ... Ne7 is technically the valid SAN
*/'''
output = ''
# print("MOVE:", move)
# print("FLAGS:", move['flags'])
# print("FROM: {} TO: {}".format(move['from'], move['to']))
if (move['flags'] & BITS['KSIDE_CASTLE']):
output = 'O-O'
elif (move['flags'] & BITS['QSIDE_CASTLE']):
output = 'O-O-O'
else:
disambiguator = chess.get_disambiguator(move, sloppy)
if (move['piece'] != PAWN):
# print("PIECE: ",move['piece'])
# print("DISAMBIG: ", disambiguator)
output += move['piece'].upper() + disambiguator
if (move['flags'] & (BITS['CAPTURE'] | BITS['EP_CAPTURE'])):
if (move['piece'] == PAWN):
output += algebraic(move['from'])[0]
output += 'x'
output += algebraic(move['to'])
if (move['flags'] & BITS['PROMOTION']):
output += '=' + move['promotion'].upper()
chess.make_move(move)
if (chess.in_check()):
if (chess.in_checkmate()):
output += '#'
else:
output += '+'
chess.undo_move()
return output
def ascii(chess):
board = chess.board
s = ' +------------------------+\n'
i = SQUARES['a8']
while i < SQUARES['h1'] + 1:
# for i in range(SQUARES['a8'], SQUARES['h1']+1):
# /* display the rank */
if (col_file(i) == 0):
s += ' ' + '87654321'[rank(i)] + ' |'
# /* empty piece */
if (board[i] == None):
s += ' . '
else:
piece = board[i]['type']
color = board[i]['color']
symbol = piece.upper() if (color == WHITE) else piece.lower()
s += ' ' + symbol + ' '
if ((i + 1) & 0x88):
s += '|\n'
i += 8
i += 1
s += ' +------------------------+\n'
s += ' a b c d e f g h\n'
return s
# // parses all of the decorators out of a SAN string
def stripped_san(move):
return move.replace('=','').replace('[+#]?[?!]*$','')
def rank(i):
return i >> 4
def col_file(i):
return i & 15
def algebraic(i):
f = col_file(i)
r = rank(i)
files = 'abcdefgh'
ranks = '87654321'
return files[f] + ranks[r]
def mirror_num(i):
f = col_file(i)
r = rank(i)
files = 'abcdefgh'
ranks = '12345678'
pos = files[f] + ranks[r]
return SQUARES[pos]
def swap_color(c):
return BLACK if c==WHITE else WHITE
def is_digit(c):
return c.isdigit()
# /* pretty = external move object */
def make_pretty(chess, ugly_move):
move = copy.deepcopy(ugly_move)
move['san'] = move_to_san(chess, move, False)
move['to'] = algebraic(move['to'])
move['from'] = algebraic(move['from'])
flags = ''
for flag in BITS:
if (BITS[flag] & move['flags']):
flags += FLAGS[flag]
move['flags'] = flags
return move
# def evaluate_board_old(mcts_board, b, player):
# board = mcts_board[0:8,:]
# #print("evaluate the board for player: " + str(player))
# val = 0
# for row in range(8):
# for col in range(8):
# piece_key = abs(board[row,col])
# print("--------------------------------------------")
# print(str('abcdefgh'[col]) +", "+ str('87654321'[row]))
# sign = -1 if board[row,col] < 0 else 1
# piece = MCTS_DECODER[piece_key]
# print("piece_key: " + str(piece_key)+", piece: " + str(piece))
# print("value here: " + str(get_piece_value(piece, row, col, sign)))
# print("--------------------------------------------")
# if(piece_key == 0):
# continue
# piece = MCTS_DECODER[piece_key]
# #print("piece: " + str(piece))
# sign = -1 if board[row,col] < 0 else 1
# #print("sign: " + str(sign))
# val += get_piece_value(piece, row, col, sign)
# #print("piece is: " +str(piece) + ". piece val: " + str(get_piece_value(piece,row,col, sign)))
# #print(str('abcdefgh'[col]) +", "+ str('87654321'[row])+ ": total val is = " + str(val))
# #input("continue?")
# print("")
# print("EVAL BOARD-------------------------------------")
# print("PLayer is: " + str(player))
# print("FINAL VAL: " + str(val))
# print()
# input("continue?")
# return val
def evaluate_board(mcts_board, player):
"""Calculate the total score for board state from a given player's point of view"""
board = mcts_board[0:8,:]
val = 0
for row in range(8):
for col in range(8):
piece_key = abs(board[row,col])
#print("--------------------------------------------")
#print(str('abcdefgh'[col]) +", "+ str('87654321'[row]))
#print("piece_key: " + str(piece_key))
#Ignore all positions on the board without a piece
if(piece_key == 0):
continue
#Grab the letter representation of the piece with it's key
piece = MCTS_DECODER[piece_key]
#Save the player that the piece belonged to
piece_color = -1 if board[row,col] < 0 else 1
#Get abs value of piece; mult by (piece_color * player) to determine if + or - for overall score
single_piece_val = get_piece_value(piece, row, col, piece_color) * piece_color * player
#Aggregate total score for the board
val += single_piece_val
return val
def get_piece_value(piece, i, j, piece_color):
"""Return absolute value of the pieces."""
eval_map = {
'p' : PAWN_EVAL,
'n' : KNIGHT_EVAL,
'b' : BISHOP_EVAL,
'r' : ROOK_EVAL,
'q' : QUEEN_EVAL,
'k' : KING_EVAL
}
eval_offset = {
'p' : 10,
'n' : 30,
'b' : 30,
'r' : 50,
'q' : 90,
'k' : 900
}
eval_matrix = eval_map[piece]
#If it's the 1st (pos) player, keep normal eval_board orientation
if piece_color > 0:
value = eval_offset[piece] + eval_matrix[i][j]
#It's the 2nd (neg) player, flip all eval boards besides the symmetric ones (q, n)
else:
if piece == 'q' or piece == 'n':
value = (eval_offset[piece] + eval_matrix[i][j])
else:
value = (eval_offset[piece] + eval_matrix[::-1][i][j])
return value
def trim(str):
return str.replace('^\s+|\s+$', '')
##########################################
# DEBUGGING UTILITIES
########################################
def perft(chess, depth):
moves = chess.generate_moves({legal: false})
nodes = 0
color = turn
i = 0
while i < len(moves):
make_move(moves[i])
if (not king_attacked(color)):
if (depth - 1 > 0):
child_nodes = perft(depth - 1)
nodes += child_nodes
else:
nodes+=1
chess.undo_move()
i+=1
return nodes
def decode_move(action):
if action == 64*64-1:
return "Switch Player"
elif action < 64*64+64*4+1 - 64*4 - 1:
tmp = action // 64
file1 = 'abcdefgh'[tmp//8]
rank1 = '87654321'[tmp%8]
pos1 = file1 + rank1
tmp2 = action % 64
file2 = 'abcdefgh'[tmp2//8]
rank2 = '87654321'[tmp2%8]
pos2 = file2 + rank2
move = {'from' : pos1, 'to' : pos2}
return move
else:
action_offset = action - 64*64
tmp = action_offset // 16
file1 = 'abcdefgh'[tmp//2]
rank1 = '72'[tmp%2]
rank2 = '81'[tmp%2]
tmp2 = action_offset % 16
promotion = tmp2//4 + 2
direction = tmp2%4
player = 1 if tmp%2 == 0 else -1
file2 = 'abcdefgh'[(tmp//2) - (direction-1)*player]
pos1 = file1 + rank1
pos2 = file2 + rank2
move = {'from' : pos1, 'to' : pos2, 'promotion' : MCTS_DECODER[promotion]}
return move
| 29.476293
| 108
| 0.494553
|
b4613d6b2b7d1924107a458a6b5a4506cb1be85e
| 1,200
|
py
|
Python
|
src/10.py
|
vulpicastor/advent-of-code-2020
|
06848cbe306e434f75e81af5793839e11baef273
|
[
"MIT"
] | null | null | null |
src/10.py
|
vulpicastor/advent-of-code-2020
|
06848cbe306e434f75e81af5793839e11baef273
|
[
"MIT"
] | null | null | null |
src/10.py
|
vulpicastor/advent-of-code-2020
|
06848cbe306e434f75e81af5793839e11baef273
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import collections
import functools
import operator as op
import re
import secrets
import numpy as np
class Node(collections.UserDict):
def __init__(self):
self.data = dict()
self.key = secrets.token_bytes(8)
def __hash__(self):
return hash(self.key)
@functools.lru_cache(maxsize=None)
def dfs_visit(node, dest):
visited = 0
for n in node:
if n is dest:
visited += 1
continue
visited += dfs_visit(n, dest)
return visited
def main():
with open("input/10.txt") as f:
lines = [int(l.strip()) for l in f.readlines()]
lines.append(0)
lines.sort()
lines.append(lines[-1] + 3)
jolts = np.array(lines)
g = collections.defaultdict(Node)
length = len(jolts)
for i, j in enumerate(jolts):
for k in range(1, 4):
if i + k < length and jolts[i+k] - j <= 3:
# g.adde(j, jolts[i+k])
g[j][g[jolts[i+k]]] = 1
else:
break
djolts = np.diff(jolts)
print(sum(djolts == 1) * (sum(djolts == 3)))
print(dfs_visit(g[0], dest=g[jolts[-1]]))
if __name__ == "__main__":
main()
| 21.052632
| 55
| 0.560833
|
37530202d68f616d54f31bae38feec18a4812017
| 2,883
|
py
|
Python
|
setup.py
|
preset-io/sqlalchemy-utils
|
b5b48b13a22aa6d796e98c0225f367a2f010c788
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
preset-io/sqlalchemy-utils
|
b5b48b13a22aa6d796e98c0225f367a2f010c788
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
preset-io/sqlalchemy-utils
|
b5b48b13a22aa6d796e98c0225f367a2f010c788
|
[
"BSD-3-Clause"
] | null | null | null |
"""
SQLAlchemy-Utils
----------------
Various utility functions and custom data types for SQLAlchemy.
"""
import os
import re
from setuptools import find_packages, setup
HERE = os.path.dirname(os.path.abspath(__file__))
def get_version():
filename = os.path.join(HERE, 'sqlalchemy_utils', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
extras_require = {
'test': [
'pytest>=2.7.1',
'Pygments>=1.2',
'Jinja2>=2.3',
'docutils>=0.10',
'flexmock>=0.9.7',
'mock==2.0.0',
'psycopg2>=2.5.1',
'psycopg2cffi>=2.8.1',
'pg8000>=1.12.4',
'pytz>=2014.2',
'python-dateutil>=2.6',
'backports.zoneinfo;python_version<"3.9"',
'pymysql',
'flake8>=2.4.0',
'isort>=4.2.2',
'pyodbc',
],
'anyjson': ['anyjson>=0.3.3'],
'babel': ['Babel>=1.3'],
'arrow': ['arrow>=0.3.4'],
'pendulum': ['pendulum>=2.0.5'],
'intervals': ['intervals>=0.7.1'],
'phone': ['phonenumbers>=5.9.2'],
'password': ['passlib >= 1.6, < 2.0'],
'color': ['colour>=0.0.4'],
'timezone': ['python-dateutil'],
'url': ['furl >= 0.4.1'],
'encrypted': ['cryptography>=0.6']
}
# Add all optional dependencies to testing requirements.
test_all = []
for requirements in extras_require.values():
test_all += requirements
extras_require['test_all'] = sorted(test_all)
setup(
name='SQLAlchemy-Utils',
version=get_version(),
url='https://github.com/kvesteri/sqlalchemy-utils',
license='BSD',
author='Konsta Vesterinen, Ryan Leckey, Janne Vanhala, Vesa Uimonen',
author_email='konsta@fastmonkeys.com',
description=(
'Various utility functions for SQLAlchemy.'
),
long_description=__doc__,
packages=find_packages('.', exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'six',
'SQLAlchemy>=1.0'
],
extras_require=extras_require,
python_requires='~=3.4',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 28.544554
| 73
| 0.582379
|
e45c472152d1ba5d6b1bf046afa9dd72d606bc0f
| 2,565
|
py
|
Python
|
sdlf-utils/pipeline-examples/event-dataset-dependencies/sdlf-engineering-stageA/lambda/stage-a-check-job/src/lambda_function.py
|
bencaldwell/aws-serverless-data-lake-framework
|
64a1903b8e1c560b207bf9574cbcacdd1512ec8f
|
[
"MIT-0"
] | null | null | null |
sdlf-utils/pipeline-examples/event-dataset-dependencies/sdlf-engineering-stageA/lambda/stage-a-check-job/src/lambda_function.py
|
bencaldwell/aws-serverless-data-lake-framework
|
64a1903b8e1c560b207bf9574cbcacdd1512ec8f
|
[
"MIT-0"
] | null | null | null |
sdlf-utils/pipeline-examples/event-dataset-dependencies/sdlf-engineering-stageA/lambda/stage-a-check-job/src/lambda_function.py
|
bencaldwell/aws-serverless-data-lake-framework
|
64a1903b8e1c560b207bf9574cbcacdd1512ec8f
|
[
"MIT-0"
] | null | null | null |
from datalake_library.commons import init_logger
from datalake_library.transforms.transform_handler import TransformHandler
from datalake_library import octagon
from datalake_library.octagon import Artifact, EventReasonEnum, peh
logger = init_logger(__name__)
def lambda_handler(event, context):
"""Calls custom job waiter developed by user
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with Processed Bucket, Key(s) and Job Details
"""
try:
logger.info('Fetching event data from previous step')
bucket = event['body']['bucket']
keys_to_process = event['body']['key']
team = event['body']['team']
stage = event['body']['pipeline_stage']
dataset = event['body']['dataset']
job_details = event['body']['job']['jobDetails']
processed_keys_path = event['body']['job']['processedKeysPath']
logger.info('Initializing Octagon client')
component = context.function_name.split('-')[-2].title()
octagon_client = (
octagon.OctagonClient()
.with_run_lambda(True)
.with_configuration_instance(event['body']['env'])
.build()
)
logger.info('Checking Job Status with user custom code')
transform_handler = TransformHandler().stage_transform(team, dataset, stage)
response = transform_handler().check_job_status(bucket, keys_to_process,
processed_keys_path, job_details) # custom user code called
response['peh_id'] = event['body']['peh_id']
if event['body']['job']['jobDetails']['jobStatus'] == 'FAILED':
peh.PipelineExecutionHistoryAPI(
octagon_client).retrieve_pipeline_execution(response['peh_id'])
octagon_client.end_pipeline_execution_failed(component=component,
issue_comment="{} {} Error: Check Job Logs".format(stage, component))
except Exception as e:
logger.error("Fatal error", exc_info=True)
peh.PipelineExecutionHistoryAPI(octagon_client).retrieve_pipeline_execution(
event['body']['peh_id'])
octagon_client.end_pipeline_execution_failed(component=component,
issue_comment="{} {} Error: {}".format(stage, component, repr(e)))
raise e
return response
| 45
| 126
| 0.632359
|
6a9d52966a0baa47166f3e4a1e1d05947837b9f1
| 15,319
|
py
|
Python
|
baselines/logger.py
|
mk37972/SCAPE
|
01080e4159917546c76dd15ae5c74e092f4ae299
|
[
"MIT"
] | null | null | null |
baselines/logger.py
|
mk37972/SCAPE
|
01080e4159917546c76dd15ae5c74e092f4ae299
|
[
"MIT"
] | null | null | null |
baselines/logger.py
|
mk37972/SCAPE
|
01080e4159917546c76dd15ae5c74e092f4ae299
|
[
"MIT"
] | null | null | null |
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = '%-8.3g' % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[:maxlen-3] + '...' if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = 'wait_' + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
from baselines.common import mpi_util
d = mpi_util.mpi_weighted_mean(self.comm,
{name : (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()})
if self.comm.rank != 0:
d['dummy'] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
return int(os.environ[varname])
return 0
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
pass
# log('Logging to %s'%dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = -33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 30.394841
| 123
| 0.548991
|
802f8ed5c84ce1fc12bfedf0a2d27f07f012e0f1
| 1,192
|
py
|
Python
|
tests/bugs/core_0507_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_0507_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_0507_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_0507
# title: ambiguous statements return unpredictable results
# decription:
#
# tracker_id: CORE-507
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set planonly;
select r.rdb$relation_name, rc.rdb$relation_name, rc.rdb$constraint_type
from rdb$relations r left join rdb$relation_constraints rc
on r.rdb$relation_name = rc.rdb$relation_name
order by rdb$relation_name;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stderr_1 = """
Statement failed, SQLSTATE = 42702
Dynamic SQL Error
-SQL error code = -204
-Ambiguous field name between a field and a field in the select list with name
-RDB$RELATION_NAME
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_expected_stderr == act_1.clean_stderr
| 24.833333
| 82
| 0.694631
|
2ec952283dae6968a504e2273563c1718d83b3f3
| 15,276
|
py
|
Python
|
schemas/lint.py
|
ryantd/determined
|
b4f3be3c1878a9a7fdad4775647018753b39ef21
|
[
"Apache-2.0"
] | 1
|
2021-03-29T13:39:45.000Z
|
2021-03-29T13:39:45.000Z
|
schemas/lint.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | null | null | null |
schemas/lint.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
from typing import Callable, List, Optional, Tuple, TypeVar, Union
Schema = Union[dict, bool]
Errors = List[Tuple[str, str]]
linters = [] # type: List[Callable]
F = TypeVar("F", bound=Callable)
def register_linter(fn: F) -> F:
linters.append(fn)
return fn
# Only compound types supported are the nullable supported types.
COMPOUND_TYPES = {
frozenset(("number", "null")): "number",
frozenset(("integer", "null")): "integer",
frozenset(("number", "null")): "number",
frozenset(("object", "null")): "object",
frozenset(("array", "null")): "array",
frozenset(("string", "null")): "string",
frozenset(("boolean", "null")): "boolean",
# This only occurs for implicitly nested hyperparameters.
frozenset(("object", "array")): "null",
}
SUPPORTED_KEYWORDS_BY_TYPE = {
"number": {
"minimum",
"exclusiveMinimum",
"maximum",
"exclusiveMaximum",
"default",
"unionKey",
"checks",
},
"integer": {
"minimum",
"exclusiveMinimum",
"maximum",
"exclusiveMaximum",
"default",
"unionKey",
"checks",
},
"object": {
"additionalProperties",
"required",
"properties",
"$ref",
"default",
"unionKey",
"disallowProperties",
"eventuallyRequired",
"checks",
"compareProperties",
"allOf",
"optionalRef",
"$comment",
},
"array": {"items", "default", "unionKey", "minLength", "checks", "$comment"},
"string": {"pattern", "default", "unionKey", "checks", "$comment"},
"boolean": {"default", "unionKey", "checks", "$comment"},
"null": {"default", "unionKey", "checks", "$comment"},
}
TOPLEVEL_KEYWORDS = {"$schema", "$id", "title"}
class LintContext:
def __init__(
self, schema: Schema, path: str, toplevel: bool, in_checks: bool, filepath: str
) -> None:
self._schema = schema
self._path = path
self.toplevel = toplevel
self.in_checks = in_checks
self.filepath = filepath
@register_linter
def check_schema(schema: dict, path: str, ctx: LintContext) -> Errors:
if not ctx.toplevel:
return []
if "$schema" not in schema:
return [(path, "$schema is missing")]
exp_schema = "http://json-schema.org/draft-07/schema#"
if schema["$schema"] != exp_schema:
return [(path, f'$schema is not "{exp_schema}"')]
return []
@register_linter
def check_id(schema: dict, path: str, ctx: LintContext) -> Errors:
if not ctx.toplevel:
return []
if "$id" not in schema:
return [(path, "$id is missing")]
subpath = path + ".$id"
exp = "http://determined.ai/schemas/" + ctx.filepath
if schema["$id"] != exp:
return [(subpath, f"$id ({schema['$id']}) is not correct for filename")]
return []
def is_required(object_schema: dict, key: str) -> bool:
return key in object_schema.get("required", [])
def is_nullable(object_schema: dict, key: str) -> bool:
sub = object_schema["properties"][key]
if isinstance(sub, bool):
return sub
assert isinstance(sub, dict), f"expected dict but got {sub}"
if "const" in sub:
return False
if "enum" in sub:
return None in sub["enum"]
if "type" in sub:
if isinstance(sub["type"], list):
return "null" in sub["type"]
return bool(sub["type"] == "null")
return False
@register_linter
def check_default_typing(schema: dict, path: str, ctx: LintContext) -> Errors:
if ctx.in_checks:
return []
if not isinstance(schema, dict) or "properties" not in schema:
return []
errors = []
required = schema.get("required", [])
for key, sub in schema["properties"].items():
subpath = path + f".{key}"
if key in required and isinstance(sub, dict) and "default" in sub:
errors.append((subpath, "default provided for required value"))
if key not in required and isinstance(sub, dict) and "default" not in sub:
errors.append((subpath, "default not provided for non-required value"))
return errors
@register_linter
def check_default_locations(schema: dict, path: str, ctx: LintContext) -> Errors:
"""
This is a bit artificial, but it's much easier to write the defaulting logic if all default
values are placed in a consistent location.
They should only ever be found at: <root>.properties.<key>.default
"""
if not isinstance(schema, dict) or "properties" not in schema:
return []
errors = []
for key, sub in schema["properties"].items():
subpath = path + f".{key}"
if isinstance(sub, dict) and "default" in sub:
if sub["default"] == "null":
errors.append(
(
subpath + ".default",
"default is the literal 'null' string, probable typo",
)
)
elif (
not re.match("^<[^>]*>\\.[^.]*$", subpath)
and sub["default"] is not None
):
# This is pretty valid in json-schema normally, but it makes reading defaults
# out of json-schema (which we need in multiple languages) much harder.
errors.append(
(subpath + ".default", "non-null default is defined on a subobject")
)
return errors
@register_linter
def check_nullable(schema: dict, path: str, ctx: LintContext) -> Errors:
"""Non-Required fields must be nullable; required fields must be non-Nullable."""
if ctx.in_checks:
return []
if not isinstance(schema, dict) or "properties" not in schema:
return []
errors = []
for key, sub in schema["properties"].items():
if sub is True:
# Don't complain about the universal match (true).
continue
subpath = path + f".{key}"
# Make sure that nullability matches the requiredness.
if is_required(schema, key) and is_nullable(schema, key):
errors.append((subpath, "required property is nullable"))
if not is_required(schema, key) and not is_nullable(schema, key):
errors.append((subpath, "non-required property is not nullable"))
# Make sure that $refs are optional on nullable objects.
if is_nullable(schema, key) and "$ref" in sub:
errors.append((subpath, "nullable $ref should be an optionalRef"))
if not is_nullable(schema, key) and "optionalRef" in sub:
errors.append((subpath, "non-nullable optionalRef should be a plain $ref"))
return errors
@register_linter
def check_types_and_keywords(schema: dict, path: str, ctx: LintContext) -> Errors:
if "type" not in schema:
return []
types = schema["type"]
if not isinstance(types, list):
types = [types]
for typ in types:
if typ not in SUPPORTED_KEYWORDS_BY_TYPE:
return [(path, f"unsupported type: {typ}")]
keys = set(schema.keys()).difference(TOPLEVEL_KEYWORDS)
keys.remove("type")
for typ in types:
keys = keys.difference(SUPPORTED_KEYWORDS_BY_TYPE[typ])
errors = []
for kw in keys:
errors.append((path, f"{kw} not allowed in schema of type {typ}"))
return errors
@register_linter
def check_union(schema: dict, path: str, ctx: LintContext) -> Errors:
if "union" not in schema:
return []
errors = []
for idx, sub in enumerate(schema["union"]["items"]):
subpath = path + f"union.items.[{idx}]"
if not isinstance(sub, dict):
errors.append((subpath, "is not a json object"))
continue
if "unionKey" not in sub:
errors.append((subpath, "has no unionKey"))
continue
if not isinstance(sub["unionKey"], str):
errors.append((subpath, "unionKey is not a string"))
continue
return errors
@register_linter
def check_conditional(schema: dict, path: str, ctx: LintContext) -> Errors:
if "conditional" not in schema:
return []
conditional = schema["conditional"]
subpath = path + ".conditional"
errors = []
if "when" not in conditional and "unless" not in conditional:
errors.append((subpath, "has no when clause or until clause"))
if "when" in conditional and "unless" in conditional:
errors.append((subpath, "has both a when clause and an until clause"))
if "enforce" not in conditional:
errors.append((subpath, "has no enforce clause"))
return errors
@register_linter
def check_compareProperties(schema: dict, path: str, ctx: LintContext) -> Errors:
if "compareProperties" not in schema:
return []
compare = schema["compareProperties"]
subpath = path + ".compareProperties"
errors = [] # type: Errors
if "type" not in compare:
errors.append((subpath, "has no type"))
if "a" not in compare:
errors.append((subpath, "has no a"))
if "b" not in compare:
errors.append((subpath, "has no b"))
if compare["type"] not in {
"a<b",
"same_units",
"length_a<length_b",
"a_is_subdir_of_b",
}:
errors.append((subpath, f'invalid type: {compare["type"]}'))
return errors
def iter_subdict(schema: dict, path: str, key: str, ctx: LintContext) -> Errors:
"""Helper function to iter_schema()."""
if key not in schema:
return []
child = schema[key]
path += f".{key}"
if not isinstance(child, dict):
return [(path, "expected a dict but got a {type(child).__name__}")]
errors = []
for key, sub in child.items():
errors += iter_schema(sub, path + f".{key}", ctx)
return errors
def iter_sublist(schema: dict, path: str, key: str, ctx: LintContext) -> Errors:
"""Helper function to iter_schema()."""
if key not in schema:
return []
child = schema[key]
path += f".{key}"
if not isinstance(child, list):
return [(path, f"expected a list but got a {type(child).__name__}")]
errors = []
for idx, sub in enumerate(child):
errors += iter_schema(sub, path + f"[{idx}]", ctx)
return errors
def iter_subschema(schema: dict, path: str, key: str, ctx: LintContext) -> Errors:
"""Helper function to iter_schema()."""
if key not in schema:
return []
child = schema[key]
path += f".{key}"
return iter_schema(child, path, ctx)
def iter_union(schema: dict, path: str, ctx: LintContext) -> Errors:
"""Helper function to iter_schema()."""
if "union" not in schema:
return []
child = schema["union"]
path += ".union"
if not isinstance(child, dict):
return [(path, f"expected a dict but got a {type(child).__name__}")]
return iter_sublist(child, path, "items", ctx)
def iter_schema(
schema: dict,
path: str,
ctx: Optional[LintContext] = None,
in_checks: bool = False,
filepath: Optional[str] = None,
) -> Errors:
"""
Iterate through structural elements of a schema. In the following example:
{
"type": "string",
"required": ["meh"],
"additionalProperties": false,
"properties": {
"meh": { "const": "some_val" }
}
}
... the root object, the `false`, and the `{ "const": "some_val" }` are each structural.
Everthing else is content-related and non-structural.
"""
if not isinstance(schema, (dict, bool)):
return [(path, "schema should be a dictionary or a bool")]
# True or False are special.
if isinstance(schema, bool):
return []
errors = []
# Apply linters to this structural element.
if ctx is None:
assert filepath, "filepath must be provided when ctx is None"
ctx = LintContext(
schema, path, toplevel=True, in_checks=in_checks, filepath=filepath
)
else:
ctx = LintContext(schema, path, False, ctx.in_checks, ctx.filepath)
for linter in linters:
try:
errors += linter(schema, path, ctx)
except Exception as e:
raise ValueError(
f"error processing schema:\n{json.dumps(schema, indent=4)}"
) from e
# Descend into child dicts of structural elements.
for kw in ["properties"]:
errors += iter_subdict(schema, path, kw, ctx)
for kw in ["checks"]:
ctx.in_checks = True
errors += iter_subdict(schema, path, kw, ctx)
# Descend into child lists of structural elements.
for kw in ["oneOf", "anyOf", "allOf"]:
errors += iter_sublist(schema, path, kw, ctx)
# Descend directly into child structural elements.
for kw in ["items", "additionalProperties", "not"]:
errors += iter_subschema(schema, path, kw, ctx)
# Descend into custom structural elements.
errors += iter_union(schema, path, ctx)
return errors
def fmt(files: List[str], reformat: bool) -> Errors:
errors = []
for file in files:
with open(file) as f:
text = f.read()
jobj = json.loads(text)
# Apply the same linting that `python -mjson.tool` would apply.
fixed = json.dumps(jobj, sort_keys=False, indent=4) + "\n"
if fixed != text:
if reformat:
with open(file, "w") as f:
f.write(fixed)
else:
doc = os.path.relpath(file, os.path.dirname(__file__))
errors.append((f"<{doc}>", "would reformat"))
return errors
def lint(files: List[str], reformat: bool) -> List[str]:
assert files, "no files provided"
invalids = []
errors = []
for file in files:
with open(file) as f:
try:
schema = json.loads(f.read())
except json.decoder.JSONDecodeError as e:
invalids.append(f"{file} not valid: {e}")
continue
doc = os.path.relpath(file, os.path.dirname(__file__))
try:
errors += iter_schema(
schema, f"<{doc}>", in_checks=doc.startswith("check-"), filepath=doc
)
except Exception as e:
raise ValueError(f"failure processing {file}") from e
# Exit now if there are invalid errors.
if invalids:
return invalids
errors += fmt(files, reformat)
if errors:
pathwidth = max(len(path) for path, error in errors)
msgs = ["%-*s %s" % (pathwidth, path, error) for path, error in errors]
return sorted(msgs)
return []
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="static analysis for json schema")
parser.add_argument("files", nargs="*", help="files to check")
parser.add_argument("--check", action="store_true", help="no auto-reformatting")
args = parser.parse_args()
errors = lint(args.files, reformat=not args.check)
if errors:
print("\n".join(errors), file=sys.stderr)
sys.exit(1)
| 28.822642
| 95
| 0.590469
|
55adaa37914aad6650d00592f18f8ceeb8fb8b82
| 11,214
|
py
|
Python
|
epd4in2.py
|
huanliang19/EinkClock
|
b58e4784051f1fa6c5a6e9516703d69a9d169996
|
[
"Unlicense"
] | 1
|
2019-09-25T03:38:19.000Z
|
2019-09-25T03:38:19.000Z
|
epd4in2.py
|
huanliang19/EinkClock
|
b58e4784051f1fa6c5a6e9516703d69a9d169996
|
[
"Unlicense"
] | 1
|
2019-09-22T17:59:15.000Z
|
2019-09-22T17:59:15.000Z
|
epd4in2.py
|
domi3006/python3-weather-epaper
|
87511a271ad056f02b0b2dba90789e16c65bf25b
|
[
"CC-BY-3.0",
"MIT"
] | null | null | null |
# //*****************************************************************************
# * | File : EPD_1in54.py
# * | Author : Waveshare team
# * | Function : Electronic paper driver
# * | Info :
# *----------------
# * | This version: V3.0
# * | Date : 2018-11-06
# * | Info : python2 demo
# * 1.Remove:
# digital_write(self, pin, value)
# digital_read(self, pin)
# delay_ms(self, delaytime)
# set_lut(self, lut)
# self.lut = self.lut_full_update
# * 2.Change:
# display_frame -> TurnOnDisplay
# set_memory_area -> SetWindow
# set_memory_pointer -> SetCursor
# get_frame_buffer -> getbuffer
# set_frame_memory -> display
# * 3.How to use
# epd = epd2in7.EPD()
# epd.init(epd.lut_full_update)
# image = Image.new('1', (epd1in54.EPD_WIDTH, epd1in54.EPD_HEIGHT), 255)
# ...
# drawing ......
# ...
# epd.display(getbuffer(image))
# ******************************************************************************//
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and//or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import epdconfig
from PIL import Image
import RPi.GPIO as GPIO
# Display resolution
EPD_WIDTH = 400
EPD_HEIGHT = 300
# GDEW042T2 commands
PANEL_SETTING = 0x00
POWER_SETTING = 0x01
POWER_OFF = 0x02
POWER_OFF_SEQUENCE_SETTING = 0x03
POWER_ON = 0x04
POWER_ON_MEASURE = 0x05
BOOSTER_SOFT_START = 0x06
DEEP_SLEEP = 0x07
DATA_START_TRANSMISSION_1 = 0x10
DATA_STOP = 0x11
DISPLAY_REFRESH = 0x12
DATA_START_TRANSMISSION_2 = 0x13
LUT_FOR_VCOM = 0x20
LUT_WHITE_TO_WHITE = 0x21
LUT_BLACK_TO_WHITE = 0x22
LUT_WHITE_TO_BLACK = 0x23
LUT_BLACK_TO_BLACK = 0x24
PLL_CONTROL = 0x30
TEMPERATURE_SENSOR_COMMAND = 0x40
TEMPERATURE_SENSOR_SELECTION = 0x41
TEMPERATURE_SENSOR_WRITE = 0x42
TEMPERATURE_SENSOR_READ = 0x43
VCOM_AND_DATA_INTERVAL_SETTING = 0x50
LOW_POWER_DETECTION = 0x51
TCON_SETTING = 0x60
RESOLUTION_SETTING = 0x61
GSST_SETTING = 0x65
GET_STATUS = 0x71
AUTO_MEASUREMENT_VCOM = 0x80
READ_VCOM_VALUE = 0x81
VCM_DC_SETTING = 0x82
PARTIAL_WINDOW = 0x90
PARTIAL_IN = 0x91
PARTIAL_OUT = 0x92
PROGRAM_MODE = 0xA0
ACTIVE_PROGRAMMING = 0xA1
READ_OTP = 0xA2
POWER_SAVING = 0xE3
class EPD:
def __init__(self):
self.reset_pin = epdconfig.RST_PIN
self.dc_pin = epdconfig.DC_PIN
self.busy_pin = epdconfig.BUSY_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
lut_vcom0 = [
0x00, 0x17, 0x00, 0x00, 0x00, 0x02,
0x00, 0x17, 0x17, 0x00, 0x00, 0x02,
0x00, 0x0A, 0x01, 0x00, 0x00, 0x01,
0x00, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_ww = [
0x40, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x40, 0x0A, 0x01, 0x00, 0x00, 0x01,
0xA0, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_bw = [
0x40, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x40, 0x0A, 0x01, 0x00, 0x00, 0x01,
0xA0, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_wb = [
0x80, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x80, 0x0A, 0x01, 0x00, 0x00, 0x01,
0x50, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_bb = [
0x80, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x80, 0x0A, 0x01, 0x00, 0x00, 0x01,
0x50, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
# Hardware reset
def reset(self):
epdconfig.digital_write(self.reset_pin, GPIO.HIGH)
epdconfig.delay_ms(200)
epdconfig.digital_write(self.reset_pin, GPIO.LOW) # module reset
epdconfig.delay_ms(200)
epdconfig.digital_write(self.reset_pin, GPIO.HIGH)
epdconfig.delay_ms(200)
def send_command(self, command):
epdconfig.digital_write(self.dc_pin, GPIO.LOW)
epdconfig.spi_writebyte([command])
def send_data(self, data):
epdconfig.digital_write(self.dc_pin, GPIO.HIGH)
epdconfig.spi_writebyte([data])
def wait_until_idle(self):
while(epdconfig.digital_read(self.busy_pin) == 0): # 0: idle, 1: busy
epdconfig.delay_ms(100)
def set_lut(self):
self.send_command(LUT_FOR_VCOM) # vcom
for count in range(0, 44):
self.send_data(self.lut_vcom0[count])
self.send_command(LUT_WHITE_TO_WHITE) # ww --
for count in range(0, 42):
self.send_data(self.lut_ww[count])
self.send_command(LUT_BLACK_TO_WHITE) # bw r
for count in range(0, 42):
self.send_data(self.lut_bw[count])
self.send_command(LUT_WHITE_TO_BLACK) # wb w
for count in range(0, 42):
self.send_data(self.lut_bb[count])
self.send_command(LUT_BLACK_TO_BLACK) # bb b
for count in range(0, 42):
self.send_data(self.lut_wb[count])
def init(self):
if (epdconfig.module_init() != 0):
return -1
# EPD hardware init start
self.reset()
self.send_command(POWER_SETTING)
self.send_data(0x03) # VDS_EN, VDG_EN
self.send_data(0x00) # VCOM_HV, VGHL_LV[1], VGHL_LV[0]
self.send_data(0x2b) # VDH
self.send_data(0x2b) # VDL
self.send_command(BOOSTER_SOFT_START)
self.send_data(0x17)
self.send_data(0x17)
self.send_data(0x17)
self.send_command(POWER_ON)
self.wait_until_idle()
self.send_command(PANEL_SETTING)
self.send_data(0xbf) # KW-BF KWR-AF BWROTP 0f
self.send_data(0x0d)
self.send_command(PLL_CONTROL)
self.send_data(0x3c) # 3A 100HZ 29 150Hz 39 200HZ 31 171HZ
self.send_command(0x61); # resolution setting
self.send_data(0x01);
self.send_data(0x90); # 128
self.send_data(0x01);
self.send_data(0x2c);
self.send_command(0x82); # vcom_DC setting
self.send_data(0x28);
self.send_command(0X50); # VCOM AND DATA INTERVAL SETTING
self.send_data(0x97); # 97white border 77black border VBDF 17|D7 VBDW 97 VBDB 57 VBDF F7 VBDW 77 VBDB 37 VBDR B7
self.set_lut()
# EPD hardware init end
return 0
def getbuffer(self, image):
buf = [0xFF] * ((self.width//8) * self.height)
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
pixels = image_monocolor.load()
# print "imwidth = %d, imheight = %d",imwidth,imheight
if(imwidth == self.width and imheight == self.height):
print("Horizontal")
for y in range(imheight):
for x in range(imwidth):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[(x + y * self.width) // 8] &= ~(0x80 >> (x % 8))
elif(imwidth == self.height and imheight == self.width):
print("Vertical")
for y in range(imheight):
for x in range(imwidth):
newx = y
newy = self.height - x - 1
if pixels[x, y] == 0:
buf[(newx + newy*self.width) // 8] &= ~(0x80 >> (y % 8))
return buf
def display(self, image):
self.send_command(DATA_START_TRANSMISSION_1)
for i in range(0, self.width * self.height // 8):
self.send_data(0xFF)
self.send_command(DATA_START_TRANSMISSION_2)
for i in range(0, self.width * self.height // 8):
self.send_data(image[i])
self.send_command(DISPLAY_REFRESH)
self.wait_until_idle()
def Clear(self, color):
self.send_command(DATA_START_TRANSMISSION_1)
for i in range(0, self.width * self.height // 8):
self.send_data(0xFF)
self.send_command(DATA_START_TRANSMISSION_2)
for i in range(0, self.width * self.height // 8):
self.send_data(0xFF)
self.send_command(DISPLAY_REFRESH)
self.wait_until_idle()
def sleep(self):
self.send_command(POWER_OFF)
self.wait_until_idle()
self.send_command(DEEP_SLEEP)
self.send_data(0XA5)
### END OF FILE ###
| 40.193548
| 125
| 0.540396
|
90c9952254dcaf471fea2e2c11970e39ae0c68d7
| 1,168
|
py
|
Python
|
utils/utils.py
|
gregbugaj/form-processor
|
0c803de43a98b4a02efa956803e64793995256ff
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
gregbugaj/form-processor
|
0c803de43a98b4a02efa956803e64793995256ff
|
[
"MIT"
] | 1
|
2021-11-09T11:11:32.000Z
|
2021-11-09T11:11:32.000Z
|
utils/utils.py
|
gregbugaj/form-processor
|
0c803de43a98b4a02efa956803e64793995256ff
|
[
"MIT"
] | null | null | null |
import os
import time
from PIL import Image
import cv2
import numpy as np
def ensure_exists(dir):
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def current_milli_time():
return round(time.time() * 1000)
def make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
if True:
print("The image size needs to be a multiple of %d. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of base" % (base, ow, oh, w, h))
return img.resize((w, h), method)
def make_power_2_cv2(img, base, method=Image.BICUBIC):
pil_snippet = Image.fromarray(img)
pil_snippet = make_power_2(pil_snippet, base=base, method=method)
cv_snip = np.array(pil_snippet)
cv_img = cv2.cvtColor(cv_snip, cv2.COLOR_RGB2BGR)# convert RGB to BGR
print(f'InShape, OutShape : {img.shape} {cv_img.shape}')
return cv_img
| 27.162791
| 79
| 0.61387
|
cc4fa99a07cdc59ca099df04a7de210c423980ed
| 83
|
py
|
Python
|
tests/periodicities/Day/Cycle_Day_1600_D_12.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/periodicities/Day/Cycle_Day_1600_D_12.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/periodicities/Day/Cycle_Day_1600_D_12.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.periodicities.period_test as per
per.buildModel((12 , 'D' , 1600));
| 16.6
| 45
| 0.722892
|
1778ee80eb00b2788eccf2ad62b2f8ff04f7f606
| 4,242
|
py
|
Python
|
detection/grad_cam.py
|
innovator1311/Grad-CAM.pytorch
|
9923e4c52842dd486582e24a405b49b2818dc983
|
[
"Apache-2.0"
] | null | null | null |
detection/grad_cam.py
|
innovator1311/Grad-CAM.pytorch
|
9923e4c52842dd486582e24a405b49b2818dc983
|
[
"Apache-2.0"
] | null | null | null |
detection/grad_cam.py
|
innovator1311/Grad-CAM.pytorch
|
9923e4c52842dd486582e24a405b49b2818dc983
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@File : grad_cam.py
@Time : 2020/3/14 下午4:06
@Author : yizuotian
@Description :
"""
import cv2
import numpy as np
class GradCAM(object):
"""
1: 网络不更新梯度,输入需要梯度更新
2: 使用目标类别的得分做反向传播
"""
def __init__(self, net, layer_name):
self.net = net.faster_rcnn
self.layer_name = layer_name
self.feature = None
self.gradient = None
self.net.eval()
self.handlers = []
self._register_hook()
def _get_features_hook(self, module, input, output):
self.feature = output
print("feature shape:{}".format(output.size()))
def _get_grads_hook(self, module, input_grad, output_grad):
"""
:param input_grad: tuple, input_grad[0]: None
input_grad[1]: weight
input_grad[2]: bias
:param output_grad:tuple,长度为1
:return:
"""
self.gradient = output_grad[0]
def _register_hook(self):
for (name, module) in self.net.named_modules():
if name == self.layer_name:
self.handlers.append(module.register_forward_hook(self._get_features_hook))
self.handlers.append(module.register_backward_hook(self._get_grads_hook))
def remove_handlers(self):
for handle in self.handlers:
handle.remove()
def __call__(self, inputs, index=0):
"""
:param inputs: {"image": [C,H,W], "height": height, "width": width}
:param index: 第几个边框
:return:
"""
self.net.zero_grad()
output = self.net.inference([inputs])
print(output)
score = output[0]['instances'].scores[index]
proposal_idx = output[0]['instances'].indices[index] # box来自第几个proposal
score.backward()
gradient = self.gradient[proposal_idx].cpu().data.numpy() # [C,H,W]
weight = np.mean(gradient, axis=(1, 2)) # [C]
feature = self.feature[proposal_idx].cpu().data.numpy() # [C,H,W]
cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
cam = np.sum(cam, axis=0) # [H,W]
cam = np.maximum(cam, 0) # ReLU
# 数值归一化
cam -= np.min(cam)
cam /= np.max(cam)
# resize to 224*224
box = output[0]['instances'].pred_boxes.tensor[index].detach().numpy().astype(np.int32)
x1, y1, x2, y2 = box
cam = cv2.resize(cam, (x2 - x1, y2 - y1))
class_id = output[0]['instances'].pred_classes[index].detach().numpy()
return cam, box, class_id
class GradCamPlusPlus(GradCAM):
def __init__(self, net, layer_name):
super(GradCamPlusPlus, self).__init__(net, layer_name)
def __call__(self, inputs, index=0):
"""
:param inputs: {"image": [C,H,W], "height": height, "width": width}
:param index: 第几个边框
:return:
"""
self.net.zero_grad()
output = self.net.inference([inputs])
print(output)
score = output[0]['instances'].scores[index]
proposal_idx = output[0]['instances'].indices[index] # box来自第几个proposal
score.backward()
gradient = self.gradient[proposal_idx].cpu().data.numpy() # [C,H,W]
gradient = np.maximum(gradient, 0.) # ReLU
indicate = np.where(gradient > 0, 1., 0.) # 示性函数
norm_factor = np.sum(gradient, axis=(1, 2)) # [C]归一化
for i in range(len(norm_factor)):
norm_factor[i] = 1. / norm_factor[i] if norm_factor[i] > 0. else 0. # 避免除零
alpha = indicate * norm_factor[:, np.newaxis, np.newaxis] # [C,H,W]
weight = np.sum(gradient * alpha, axis=(1, 2)) # [C] alpha*ReLU(gradient)
feature = self.feature[proposal_idx].cpu().data.numpy() # [C,H,W]
cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
cam = np.sum(cam, axis=0) # [H,W]
# cam = np.maximum(cam, 0) # ReLU
# 数值归一化
cam -= np.min(cam)
cam /= np.max(cam)
# resize to box scale
box = output[0]['instances'].pred_boxes.tensor[index].detach().numpy().astype(np.int32)
x1, y1, x2, y2 = box
cam = cv2.resize(cam, (x2 - x1, y2 - y1))
return cam
| 32.630769
| 95
| 0.562235
|
5113ac1082bc03f1922635529d99a9faa95678b3
| 6,901
|
py
|
Python
|
tests/ut/python/dataset/test_ten_crop.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 55
|
2020-12-17T10:26:06.000Z
|
2022-03-28T07:18:26.000Z
|
tests/ut/python/dataset/test_ten_crop.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_ten_crop.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 14
|
2021-01-29T02:39:47.000Z
|
2022-03-23T05:00:26.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing TenCrop in DE
"""
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as vision
from mindspore import log as logger
from util import visualize_list, save_and_check_md5
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def util_test_ten_crop(crop_size, vertical_flip=False, plot=False):
"""
Utility function for testing TenCrop. Input arguments are given by other tests
"""
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
vision.Decode(),
vision.ToTensor(),
]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
vision.Decode(),
vision.TenCrop(crop_size, use_vertical_flip=vertical_flip),
lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
transform_2 = mindspore.dataset.transforms.py_transforms.Compose(transforms_2)
data2 = data2.map(operations=transform_2, input_columns=["image"])
num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
num_iter += 1
image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_2 = item2["image"]
logger.info("shape of image_1: {}".format(image_1.shape))
logger.info("shape of image_2: {}".format(image_2.shape))
logger.info("dtype of image_1: {}".format(image_1.dtype))
logger.info("dtype of image_2: {}".format(image_2.dtype))
if plot:
visualize_list(np.array([image_1] * 10), (image_2 * 255).astype(np.uint8).transpose(0, 2, 3, 1))
# The output data should be of a 4D tensor shape, a stack of 10 images.
assert len(image_2.shape) == 4
assert image_2.shape[0] == 10
def test_ten_crop_op_square(plot=False):
"""
Tests TenCrop for a square crop
"""
logger.info("test_ten_crop_op_square")
util_test_ten_crop(200, plot=plot)
def test_ten_crop_op_rectangle(plot=False):
"""
Tests TenCrop for a rectangle crop
"""
logger.info("test_ten_crop_op_rectangle")
util_test_ten_crop((200, 150), plot=plot)
def test_ten_crop_op_vertical_flip(plot=False):
"""
Tests TenCrop with vertical flip set to True
"""
logger.info("test_ten_crop_op_vertical_flip")
util_test_ten_crop(200, vertical_flip=True, plot=plot)
def test_ten_crop_md5():
"""
Tests TenCrops for giving the same results in multiple runs.
Since TenCrop is a deterministic function, we expect it to return the same result for a specific input every time
"""
logger.info("test_ten_crop_md5")
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
vision.Decode(),
vision.TenCrop((200, 100), use_vertical_flip=True),
lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
transform_2 = mindspore.dataset.transforms.py_transforms.Compose(transforms_2)
data2 = data2.map(operations=transform_2, input_columns=["image"])
# Compare with expected md5 from images
filename = "ten_crop_01_result.npz"
save_and_check_md5(data2, filename, generate_golden=GENERATE_GOLDEN)
def test_ten_crop_list_size_error_msg():
"""
Tests TenCrop error message when the size arg has more than 2 elements
"""
logger.info("test_ten_crop_list_size_error_msg")
with pytest.raises(TypeError) as info:
_ = [
vision.Decode(),
vision.TenCrop([200, 200, 200]),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
error_msg = "Size should be a single integer or a list/tuple (h, w) of length 2."
assert error_msg == str(info.value)
def test_ten_crop_invalid_size_error_msg():
"""
Tests TenCrop error message when the size arg is not positive
"""
logger.info("test_ten_crop_invalid_size_error_msg")
with pytest.raises(ValueError) as info:
_ = [
vision.Decode(),
vision.TenCrop(0),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
error_msg = "Input is not within the required interval of (1 to 16777216)."
assert error_msg == str(info.value)
with pytest.raises(ValueError) as info:
_ = [
vision.Decode(),
vision.TenCrop(-10),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
assert error_msg == str(info.value)
def test_ten_crop_wrong_img_error_msg():
"""
Tests TenCrop error message when the image is not in the correct format.
"""
logger.info("test_ten_crop_wrong_img_error_msg")
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
vision.Decode(),
vision.TenCrop(200),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"])
with pytest.raises(RuntimeError) as info:
data.create_tuple_iterator(num_epochs=1).__next__()
error_msg = "TypeError: __call__() takes 2 positional arguments but 11 were given"
# error msg comes from ToTensor()
assert error_msg in str(info.value)
if __name__ == "__main__":
test_ten_crop_op_square(plot=True)
test_ten_crop_op_rectangle(plot=True)
test_ten_crop_op_vertical_flip(plot=True)
test_ten_crop_md5()
test_ten_crop_list_size_error_msg()
test_ten_crop_invalid_size_error_msg()
test_ten_crop_wrong_img_error_msg()
| 35.756477
| 117
| 0.696276
|
dfd5afbf9e8c64c70b770fcd652b44299c23b4dc
| 78
|
py
|
Python
|
isshub/domain/contexts/code_repository/entities/namespace/tests/__init__.py
|
kernicPanel/isshub
|
841496f6fa9cb579709fa007414e0a40918f8af7
|
[
"MIT"
] | 1
|
2021-02-11T00:49:15.000Z
|
2021-02-11T00:49:15.000Z
|
isshub/domain/contexts/code_repository/entities/namespace/tests/__init__.py
|
kernicPanel/isshub
|
841496f6fa9cb579709fa007414e0a40918f8af7
|
[
"MIT"
] | 4
|
2019-06-07T19:06:03.000Z
|
2020-10-04T10:09:46.000Z
|
isshub/domain/contexts/code_repository/entities/namespace/tests/__init__.py
|
kernicPanel/isshub
|
841496f6fa9cb579709fa007414e0a40918f8af7
|
[
"MIT"
] | 1
|
2019-09-15T09:00:52.000Z
|
2019-09-15T09:00:52.000Z
|
"""Package holding the tests for the ``Namespace`` code_repository entity."""
| 39
| 77
| 0.74359
|
54d4bda6b47a4e89540e6b2ff57905dc57240723
| 40,431
|
py
|
Python
|
test/functional/test_framework/messages.py
|
KnoxFS/Wallet-5.3.2.0
|
e69b700127e78cc5fda4f012c2609ab1fa7a276d
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/messages.py
|
KnoxFS/Wallet-5.3.2.0
|
e69b700127e78cc5fda4f012c2609ab1fa7a276d
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/messages.py
|
KnoxFS/Wallet-5.3.2.0
|
e69b700127e78cc5fda4f012c2609ab1fa7a276d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70922
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 2000000
CURRENT_BLK_VERSION = 10
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "MSG_ERROR",
1: "MSG_TX",
2: "MSG_BLOCK",
3: "MSG_FILTERED_BLOCK",
4: "MSG_TXLOCK_REQUEST",
5: "MSG_TXLOCK_VOTE",
6: "MSG_SPORK",
7: "MSG_MASTERNODE_WINNER",
8: "MSG_MASTERNODE_SCANNING_ERROR",
9: "MSG_BUDGET_VOTE",
10: "MSG_BUDGET_PROPOSAL",
11: "MSG_BUDGET_FINALIZED",
12: "MSG_BUDGET_FINALIZED_VOTE",
13: "MSG_MASTERNODE_QUORUM",
14: "MSG_MASTERNODE_QUORUM",
15: "MSG_MASTERNODE_ANNOUNCE",
16: "MSG_MASTERNODE_PING",
17: "MSG_DSTX",
18: "MSG_PUBCOINS",
19: "MSG_GENWIT",
20: "MSG_ACC_VALUE"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def serialize_uniqueness(self):
r = b""
r += struct.pack("<I", self.n)
r += ser_uint256(self.hash)
return r
def deserialize_uniqueness(self, f):
self.n = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
NullOutPoint = COutPoint(0, 0xffffffff)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
def is_zerocoinspend(self):
return bytes_to_hex_str(self.scriptSig)[:2] == "c2"
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.sapData = b""
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sapData = tx.sapData
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
if self.nVersion >= 2:
self.sapData = deser_string(f)
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
if self.nVersion >= 2:
r += ser_string(self.sapData)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def is_coinbase(self):
return (
len(self.vin) == 1 and
self.vin[0].prevout == NullOutPoint and
(not self.vin[0].is_zerocoinspend())
)
def is_coinstake(self):
return (
len(self.vin) == 1 and
len(self.vout) >= 2 and
self.vout[0] == CTxOut()
)
def from_hex(self, hexstring):
f = BytesIO(hex_str_to_bytes(hexstring))
self.deserialize(f)
def spends(self, outpoint):
return len([x for x in self.vin if
x.prevout.hash == outpoint.hash and x.prevout.n == outpoint.n]) > 0
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.hashFinalSaplingRoot = header.hashFinalSaplingRoot
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = CURRENT_BLK_VERSION
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.hashFinalSaplingRoot = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
if self.nVersion >= 8:
self.hashFinalSaplingRoot = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
if self.nVersion >= 8:
r += ser_uint256(self.hashFinalSaplingRoot)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
if self.nVersion >= 8:
r += ser_uint256(self.hashFinalSaplingRoot)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# KFX
def solve_stake(self, stakeInputs, prevModifier):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for uniqueness in stakeInputs:
nvalue, _, prevTime = stakeInputs[uniqueness]
target = int(target0 * nvalue / 100) % 2**256
data = b""
# always modifier V2 (256 bits) on regtest
data += ser_uint256(prevModifier)
data += struct.pack("<I", prevTime)
# prevout is CStake uniqueness
data += uniqueness
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = uniqueness
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
self.sig_key = None # not serialized / used only to re_sign
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
self.sig_key = None
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
if self.nVersion >= 8:
data += ser_uint256(self.hashFinalSaplingRoot)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
self.sig_key = key
self.low_s = low_s
def re_sign_block(self):
if self.sig_key == None:
raise Exception("Unable to re-sign block. Key Not present, use 'sign_block' first.")
return self.sign_block(self.sig_key, self.low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
| 29.468659
| 262
| 0.59536
|
c9706f5ab155149137ae122d2738fb415b0f080d
| 6,911
|
py
|
Python
|
ord_interface/search.py
|
open-reaction-database/ord-interface
|
730e851d82afb88212619ac64aaed6fda85c1690
|
[
"Apache-2.0"
] | 5
|
2021-11-30T12:26:36.000Z
|
2022-03-22T04:48:31.000Z
|
ord_interface/search.py
|
open-reaction-database/ord-interface
|
730e851d82afb88212619ac64aaed6fda85c1690
|
[
"Apache-2.0"
] | 16
|
2020-12-12T15:09:42.000Z
|
2022-03-11T16:46:53.000Z
|
ord_interface/search.py
|
open-reaction-database/ord-interface
|
730e851d82afb88212619ac64aaed6fda85c1690
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query web interface to the Open Reaction Database in Postgres.
The client is stateless. Full information is in the URL so results can be
linked. Query parameters are communicated in URL GET params:
component=<pattern;source;(exact|substructure|similarity|smarts)>
The second token specifies whether the predicate should match an input or
an output.
The last token specifies the matching criterion. The default is "exact".
The pattern is a SMILES string, unless the token is "smarts" in which case
the pattern is a SMARTS string.
Component may be repeated any number of times.
reaction_ids=<ids>
reaction_smarts=<smarts>
These query types are mutually exclusive. All query parameters are assumed to
be URL-encoded.
"""
# pylint: disable=too-many-locals
import dataclasses
import os
from typing import NewType, Optional, Tuple
import flask
from ord_schema.visualization import generate_text
from ord_interface import query
app = flask.Flask(__name__, template_folder='.')
POSTGRES_HOST = os.getenv('POSTGRES_HOST', 'localhost')
POSTGRES_PORT = os.getenv('POSTGRES_PORT', '5432')
POSTGRES_USER = os.getenv('POSTGRES_USER', 'ord-postgres')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD', 'ord-postgres')
Query = NewType('Query', query.ReactionQueryBase)
@app.route('/')
def show_root():
"""Shows the web form.
Creates a query to show a set of randomly selected reactions so the
page won't be empty.
"""
command, limit = build_query()
if command is None:
command = query.RandomSampleQuery(100)
query_json = command.json()
try:
results = connect().run_query(command, limit=limit, return_ids=True)
error = None
except query.QueryException as exception:
results = None
error = f'(Error) {exception}'
if results is not None and not results:
results = None
error = 'query did not match any reactions'
return flask.render_template('search.html',
results=results,
error=error,
query=query_json)
@app.route('/id/<reaction_id>')
def show_id(reaction_id):
"""Returns the pbtxt of a single reaction as plain text."""
results = connect().run_query(query.ReactionIdQuery([reaction_id]))
if len(results) == 0:
return flask.abort(404)
return generate_text.generate_summary(reaction=results[0].reaction,
dataset_id=results[0].dataset_id)
@app.route('/render/<reaction_id>')
def render_reaction(reaction_id):
"""Renders a reaction as an HTML table with images and text."""
command = query.ReactionIdQuery([reaction_id])
results = connect().run_query(command)
if len(results) == 0 or len(results) > 1:
return flask.abort(404)
result = results[0]
try:
html = generate_text.generate_html(reaction=result.reaction,
compact=True)
return flask.jsonify(html)
except (ValueError, KeyError):
return flask.jsonify('[Reaction cannot be displayed]')
def connect():
return query.OrdPostgres(dbname='ord',
user=POSTGRES_USER,
password=POSTGRES_PASSWORD,
host=POSTGRES_HOST,
port=int(POSTGRES_PORT))
@app.route('/api/fetch_reactions', methods=['POST'])
def fetch_reactions():
"""Fetches a list of Reactions by ID."""
reaction_ids = flask.request.get_json()
command = query.ReactionIdQuery(reaction_ids)
try:
results = connect().run_query(command)
return flask.jsonify([dataclasses.asdict(result) for result in results])
except query.QueryException as error:
return flask.abort(flask.make_response(str(error), 400))
@app.route('/api/query')
def run_query():
"""Builds and executes a GET query.
Returns:
A serialized Dataset proto containing the matched reactions.
"""
command, limit = build_query()
if command is None:
return flask.abort(flask.make_response('no query defined', 400))
try:
results = connect().run_query(command, limit=limit)
return flask.jsonify([dataclasses.asdict(result) for result in results])
except query.QueryException as error:
return flask.abort(flask.make_response(str(error), 400))
def build_query() -> Tuple[Optional[Query], Optional[int]]:
"""Builds a query from GET parameters.
Returns:
query: ReactionQueryBase subclass instance.
limit: Maximum number of results to return.
"""
dataset_ids = flask.request.args.get('dataset_ids')
reaction_ids = flask.request.args.get('reaction_ids')
reaction_smarts = flask.request.args.get('reaction_smarts')
dois = flask.request.args.get('dois')
components = flask.request.args.getlist('component')
use_stereochemistry = flask.request.args.get('use_stereochemistry')
similarity = flask.request.args.get('similarity')
limit = flask.request.args.get('limit')
if limit is not None:
limit = int(limit)
if dataset_ids is not None:
command = query.DatasetIdQuery(dataset_ids.split(','))
elif reaction_ids is not None:
command = query.ReactionIdQuery(reaction_ids.split(','))
elif reaction_smarts is not None:
command = query.ReactionSmartsQuery(reaction_smarts)
elif dois is not None:
command = query.DoiQuery(dois.split(','))
elif components:
predicates = []
for component in components:
pattern, source, mode_name = component.split(';')
table = query.ReactionComponentPredicate.SOURCE_TO_TABLE[source]
mode = query.ReactionComponentPredicate.MatchMode.from_name(
mode_name)
predicates.append(
query.ReactionComponentPredicate(pattern, table, mode))
kwargs = {}
if use_stereochemistry is not None:
kwargs['do_chiral_sss'] = use_stereochemistry
if similarity is not None:
kwargs['tanimoto_threshold'] = float(similarity)
command = query.ReactionComponentQuery(predicates, **kwargs)
else:
command = None
return command, limit
| 36.183246
| 80
| 0.673274
|
5b512c1f334f5f8cfa016af4b23d348b96c80009
| 3,350
|
py
|
Python
|
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_blob_fs_source_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_blob_fs_source_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2020-03-06T05:57:16.000Z
|
2020-03-06T05:57:16.000Z
|
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_blob_fs_source_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .copy_source_py3 import CopySource
class AzureBlobFSSource(CopySource):
"""A copy activity Azure BlobFS source.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param max_concurrent_connections: The maximum concurrent connection count
for the source data store. Type: integer (or Expression with resultType
integer).
:type max_concurrent_connections: object
:param type: Required. Constant filled by server.
:type type: str
:param treat_empty_as_null: Treat empty as null. Type: boolean (or
Expression with resultType boolean).
:type treat_empty_as_null: object
:param skip_header_line_count: Number of header lines to skip from each
blob. Type: integer (or Expression with resultType integer).
:type skip_header_line_count: object
:param recursive: If true, files under the folder path will be read
recursively. Default is true. Type: boolean (or Expression with resultType
boolean).
:type recursive: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'treat_empty_as_null': {'key': 'treatEmptyAsNull', 'type': 'object'},
'skip_header_line_count': {'key': 'skipHeaderLineCount', 'type': 'object'},
'recursive': {'key': 'recursive', 'type': 'object'},
}
def __init__(self, *, additional_properties=None, source_retry_count=None, source_retry_wait=None, max_concurrent_connections=None, treat_empty_as_null=None, skip_header_line_count=None, recursive=None, **kwargs) -> None:
super(AzureBlobFSSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs)
self.treat_empty_as_null = treat_empty_as_null
self.skip_header_line_count = skip_header_line_count
self.recursive = recursive
self.type = 'AzureBlobFSSource'
| 48.550725
| 233
| 0.680896
|
6a420ec33eb67abfa43321573d4c3ab6e5336cab
| 136
|
py
|
Python
|
space_game/AccelerationDirection.py
|
Iwomichu/probable-giggle
|
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
|
[
"MIT"
] | 1
|
2020-11-30T11:21:21.000Z
|
2020-11-30T11:21:21.000Z
|
space_game/AccelerationDirection.py
|
Iwomichu/probable-giggle
|
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
|
[
"MIT"
] | 5
|
2020-11-03T16:46:49.000Z
|
2021-01-24T14:29:24.000Z
|
space_game/AccelerationDirection.py
|
Iwomichu/probable-giggle
|
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
|
[
"MIT"
] | null | null | null |
from enum import Enum, auto
class AccelerationDirection(Enum):
LEFT = auto()
RIGHT = auto()
UP = auto()
DOWN = auto()
| 15.111111
| 34
| 0.610294
|
01e71a417c5ec18e4d0daa2cf70bc02d12cdcfc3
| 79,726
|
py
|
Python
|
tccli/services/bmvpc/bmvpc_client.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/bmvpc/bmvpc_client.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/bmvpc/bmvpc_client.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.bmvpc.v20180625 import bmvpc_client as bmvpc_client_v20180625
from tencentcloud.bmvpc.v20180625 import models as models_v20180625
def doDownloadCustomerGatewayConfiguration(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DownloadCustomerGatewayConfigurationRequest()
model.from_json_string(json.dumps(args))
rsp = client.DownloadCustomerGatewayConfiguration(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCustomerGateways(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCustomerGatewaysRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeCustomerGateways(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateVpcPeerConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateVpcPeerConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateVpcPeerConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAsyncRegisterIps(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AsyncRegisterIpsRequest()
model.from_json_string(json.dumps(args))
rsp = client.AsyncRegisterIps(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRouteTables(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRouteTablesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeRouteTables(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteHostedInterfaces(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteHostedInterfacesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteHostedInterfaces(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnbindEipsFromNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnbindEipsFromNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnbindEipsFromNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindEipsToNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindEipsToNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindEipsToNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyVpcPeerConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyVpcPeerConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyVpcPeerConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateVpc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateVpcRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateVpc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteVpnGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteVpnGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteVpnGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRoutePolicies(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRoutePoliciesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeRoutePolicies(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteVpc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteVpcRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteVpc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteHostedInterface(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteHostedInterfaceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteHostedInterface(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeregisterIps(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeregisterIpsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeregisterIps(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyRoutePolicy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyRoutePolicyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyRoutePolicy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateInterfaces(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateInterfacesRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateInterfaces(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyCustomerGatewayAttribute(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyCustomerGatewayAttributeRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyCustomerGatewayAttribute(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteRoutePolicy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteRoutePolicyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteRoutePolicy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateVirtualSubnetWithVlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateVirtualSubnetWithVlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateVirtualSubnetWithVlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyVpnConnectionAttribute(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyVpnConnectionAttributeRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyVpnConnectionAttribute(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteVpnConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteVpnConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteVpnConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSubnetAvailableIps(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSubnetAvailableIpsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeSubnetAvailableIps(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpcs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpcsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpcs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnbindIpsFromNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnbindIpsFromNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnbindIpsFromNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyVpnGatewayAttribute(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyVpnGatewayAttributeRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyVpnGatewayAttribute(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteSubnet(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteSubnetRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteSubnet(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeNatSubnets(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeNatSubnetsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeNatSubnets(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRejectVpcPeerConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RejectVpcPeerConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.RejectVpcPeerConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSubnets(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSubnetsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeSubnets(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateCustomerGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateCustomerGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateCustomerGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifySubnetDHCPRelay(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifySubnetDHCPRelayRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifySubnetDHCPRelay(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpcView(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpcViewRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpcView(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteVirtualIp(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteVirtualIpRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteVirtualIp(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSubnetByDevice(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSubnetByDeviceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeSubnetByDevice(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTaskStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTaskStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTaskStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAcceptVpcPeerConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AcceptVpcPeerConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.AcceptVpcPeerConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpgradeNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpgradeNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpgradeNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteCustomerGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteCustomerGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteCustomerGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateRoutePolicies(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateRoutePoliciesRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateRoutePolicies(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyRouteTable(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyRouteTableRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyRouteTable(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnbindSubnetsFromNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnbindSubnetsFromNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnbindSubnetsFromNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetVpnConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetVpnConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.ResetVpnConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpnGateways(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpnGatewaysRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpnGateways(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifySubnetAttribute(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifySubnetAttributeRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifySubnetAttribute(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateDockerSubnetWithVlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateDockerSubnetWithVlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateDockerSubnetWithVlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeNatGateways(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeNatGatewaysRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeNatGateways(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindSubnetsToNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindSubnetsToNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindSubnetsToNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpcQuota(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpcQuotaRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpcQuota(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateHostedInterface(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateHostedInterfaceRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateHostedInterface(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSubnetByHostedDevice(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSubnetByHostedDeviceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeSubnetByHostedDevice(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpcPeerConnections(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpcPeerConnectionsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpcPeerConnections(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpcResource(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpcResourceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpcResource(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteVpcPeerConnection(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteVpcPeerConnectionRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteVpcPeerConnection(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteInterfaces(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteInterfacesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteInterfaces(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyVpcAttribute(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyVpcAttributeRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyVpcAttribute(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindIpsToNatGateway(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindIpsToNatGatewayRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindIpsToNatGateway(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpnConnections(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpnConnectionsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpnConnections(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateSubnet(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmvpcClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateSubnetRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateSubnet(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180625": bmvpc_client_v20180625,
}
MODELS_MAP = {
"v20180625": models_v20180625,
}
ACTION_MAP = {
"DownloadCustomerGatewayConfiguration": doDownloadCustomerGatewayConfiguration,
"DescribeCustomerGateways": doDescribeCustomerGateways,
"CreateVpcPeerConnection": doCreateVpcPeerConnection,
"AsyncRegisterIps": doAsyncRegisterIps,
"DescribeRouteTables": doDescribeRouteTables,
"DeleteHostedInterfaces": doDeleteHostedInterfaces,
"UnbindEipsFromNatGateway": doUnbindEipsFromNatGateway,
"BindEipsToNatGateway": doBindEipsToNatGateway,
"ModifyVpcPeerConnection": doModifyVpcPeerConnection,
"CreateVpc": doCreateVpc,
"CreateNatGateway": doCreateNatGateway,
"DeleteVpnGateway": doDeleteVpnGateway,
"DescribeRoutePolicies": doDescribeRoutePolicies,
"DeleteVpc": doDeleteVpc,
"DeleteHostedInterface": doDeleteHostedInterface,
"DeregisterIps": doDeregisterIps,
"ModifyRoutePolicy": doModifyRoutePolicy,
"CreateInterfaces": doCreateInterfaces,
"ModifyCustomerGatewayAttribute": doModifyCustomerGatewayAttribute,
"DeleteRoutePolicy": doDeleteRoutePolicy,
"CreateVirtualSubnetWithVlan": doCreateVirtualSubnetWithVlan,
"ModifyVpnConnectionAttribute": doModifyVpnConnectionAttribute,
"DeleteVpnConnection": doDeleteVpnConnection,
"DescribeSubnetAvailableIps": doDescribeSubnetAvailableIps,
"DescribeVpcs": doDescribeVpcs,
"UnbindIpsFromNatGateway": doUnbindIpsFromNatGateway,
"ModifyVpnGatewayAttribute": doModifyVpnGatewayAttribute,
"DeleteSubnet": doDeleteSubnet,
"DescribeNatSubnets": doDescribeNatSubnets,
"RejectVpcPeerConnection": doRejectVpcPeerConnection,
"DescribeSubnets": doDescribeSubnets,
"CreateCustomerGateway": doCreateCustomerGateway,
"DeleteNatGateway": doDeleteNatGateway,
"ModifySubnetDHCPRelay": doModifySubnetDHCPRelay,
"DescribeVpcView": doDescribeVpcView,
"DeleteVirtualIp": doDeleteVirtualIp,
"DescribeSubnetByDevice": doDescribeSubnetByDevice,
"DescribeTaskStatus": doDescribeTaskStatus,
"AcceptVpcPeerConnection": doAcceptVpcPeerConnection,
"UpgradeNatGateway": doUpgradeNatGateway,
"DeleteCustomerGateway": doDeleteCustomerGateway,
"CreateRoutePolicies": doCreateRoutePolicies,
"ModifyRouteTable": doModifyRouteTable,
"UnbindSubnetsFromNatGateway": doUnbindSubnetsFromNatGateway,
"ResetVpnConnection": doResetVpnConnection,
"DescribeVpnGateways": doDescribeVpnGateways,
"ModifySubnetAttribute": doModifySubnetAttribute,
"CreateDockerSubnetWithVlan": doCreateDockerSubnetWithVlan,
"DescribeNatGateways": doDescribeNatGateways,
"BindSubnetsToNatGateway": doBindSubnetsToNatGateway,
"DescribeVpcQuota": doDescribeVpcQuota,
"CreateHostedInterface": doCreateHostedInterface,
"DescribeSubnetByHostedDevice": doDescribeSubnetByHostedDevice,
"DescribeVpcPeerConnections": doDescribeVpcPeerConnections,
"DescribeVpcResource": doDescribeVpcResource,
"DeleteVpcPeerConnection": doDeleteVpcPeerConnection,
"DeleteInterfaces": doDeleteInterfaces,
"ModifyVpcAttribute": doModifyVpcAttribute,
"BindIpsToNatGateway": doBindIpsToNatGateway,
"DescribeVpnConnections": doDescribeVpnConnections,
"CreateSubnet": doCreateSubnet,
}
AVAILABLE_VERSION_LIST = [
"v20180625",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["bmvpc"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["bmvpc"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return g_param
| 43.926171
| 105
| 0.73064
|
cd30722f19309f0eded0703dfa7c8ca621630222
| 1,696
|
py
|
Python
|
sdks/python/apache_beam/utils/sharded_key.py
|
eyal0/beam
|
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
|
[
"Apache-2.0"
] | 1
|
2019-08-02T18:03:15.000Z
|
2019-08-02T18:03:15.000Z
|
sdks/python/apache_beam/utils/sharded_key.py
|
eyal0/beam
|
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
|
[
"Apache-2.0"
] | 80
|
2020-01-16T09:55:09.000Z
|
2020-10-03T13:43:07.000Z
|
sdks/python/apache_beam/utils/sharded_key.py
|
eyal0/beam
|
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
|
[
"Apache-2.0"
] | 1
|
2020-04-29T20:09:40.000Z
|
2020-04-29T20:09:40.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
class ShardedKey(object):
"""
A sharded key consisting of a user key and an opaque shard id represented by
bytes.
Attributes:
key: The user key.
shard_id: An opaque byte string that uniquely represents a shard of the key.
"""
def __init__(
self,
key,
shard_id, # type: bytes
):
# type: (...) -> None
assert shard_id is not None
self._key = key
self._shard_id = shard_id
@property
def key(self):
return self._key
def __repr__(self):
return '(%s, %s)' % (repr(self.key), self._shard_id)
def __eq__(self, other):
return (
type(self) == type(other) and self.key == other.key and
self._shard_id == other._shard_id)
def __hash__(self):
return hash((self.key, self._shard_id))
def __reduce__(self):
return ShardedKey, (self.key, self._shard_id)
| 28.745763
| 80
| 0.70342
|
e36a3eea2b0aeb4c449df47f838f0748a74d99c4
| 7,399
|
py
|
Python
|
api/tests/v2/test_resource_requests.py
|
simpsonw/atmosphere
|
3a5203ef0b563de3a0e8c8c8715df88186532d7a
|
[
"BSD-3-Clause"
] | 197
|
2016-12-08T02:33:32.000Z
|
2022-03-23T14:27:47.000Z
|
api/tests/v2/test_resource_requests.py
|
simpsonw/atmosphere
|
3a5203ef0b563de3a0e8c8c8715df88186532d7a
|
[
"BSD-3-Clause"
] | 385
|
2017-01-03T22:51:46.000Z
|
2020-12-16T16:20:42.000Z
|
api/tests/v2/test_resource_requests.py
|
benlazarine/atmosphere
|
38fad8e4002e510e8b4294f2bb5bc75e8e1817fa
|
[
"BSD-3-Clause"
] | 50
|
2016-12-08T08:32:25.000Z
|
2021-12-10T00:21:39.000Z
|
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from rest_framework.test import APITestCase, force_authenticate
from api.tests.v2.base import APISanityTestCase
from api.v2.views import ResourceRequestViewSet, AdminResourceRequestViewSet
from core.models import ResourceRequest, StatusType, AtmosphereUser
class ResourceRequestSanityTests(APITestCase, APISanityTestCase):
url_route = 'api:v2:resourcerequest'
def setUp(self):
self.list_view = ResourceRequestViewSet.as_view({'get': 'list'})
self.user = AtmosphereUser.objects.create(username='user')
class UserResourceRequestTests(APITestCase):
def test_list_user_resource_requests(self):
"""
Users shouldn't see other users resource requests
"""
# Create a request owned by a user
user_a = AtmosphereUser.objects.create(username='user_a')
create_request_for_user(user_a)
# Create a request owned by a user
user_b = AtmosphereUser.objects.create(username='user_b')
create_request_for_user(user_b)
# Fetch user_a's requests from api
url = reverse('api:v2:resourcerequest-list')
view = ResourceRequestViewSet.as_view({'get': 'list'})
factory = APIRequestFactory()
request = factory.get(url)
force_authenticate(request, user=user_a)
response = view(request)
# The resource requests returned were created by user_a
for resource_request in response.data["results"]:
self.assertEqual(
resource_request["created_by"]["username"], user_a.username
)
def test_user_not_allowed_to_approve(self):
"""
Users cannot approve their own requests
"""
update_status, _ = StatusType.objects.get_or_create(name='approved')
response = submit_patch_with_payload(
{
'status': {
'id': update_status.pk
}
}
)
self.assertEqual(response.status_code, 403)
def test_user_not_allowed_to_deny(self):
"""
Users cannot deny their own requests
"""
update_status, _ = StatusType.objects.get_or_create(name='denied')
response = submit_patch_with_payload(
{
'status': {
'id': update_status.pk
}
}
)
self.assertEqual(response.status_code, 403)
def test_user_closing_their_request(self):
"""
Users can close their own requests
"""
user = AtmosphereUser.objects.create(username='user')
resource_request = create_request_for_user(user)
update_status, _ = StatusType.objects.get_or_create(name='closed')
# Close their request
response = submit_patch_with_payload(
{
'status': {
'id': update_status.pk
}
},
user=user,
resource_request=resource_request
)
# Assert api success
self.assertEqual(response.status_code, 200)
# Assert that the resource request was actually closed
updated_resource_request = ResourceRequest.objects.get(
pk=resource_request.pk
)
self.assertEqual(
updated_resource_request.status.name, update_status.name
)
def test_user_cannot_update_admin_message(self):
"""
Users cannot update the admin message of their request
"""
user = AtmosphereUser.objects.create(username='user')
resource_request = create_request_for_user(user)
# Attempt to update the admin message
response = submit_patch_with_payload(
{
"admin_message": "idk why anyone would do this"
},
user=user,
resource_request=resource_request
)
# Assert that they don't have permission
self.assertEqual(response.status_code, 403)
# Assert that the resource request wasn't changed
updated_resource_request = ResourceRequest.objects.get(
pk=resource_request.pk
)
self.assertEqual(
updated_resource_request.admin_message,
resource_request.admin_message
)
def test_user_can_submit_resource_request(self):
"""
Users cannot update the admin message of their request
"""
user = AtmosphereUser.objects.create(username='user')
url = reverse('api:v2:resourcerequest-list')
view = ResourceRequestViewSet.as_view({'post': 'create'})
factory = APIRequestFactory()
request = factory.post(
url, {
'request':
"100000 AU",
'description':
'Make world better place',
'admin_url':
'https://local.atmo.cloud/application/admin/resource_requests/'
}
)
force_authenticate(request, user=user)
response = view(request)
# Response indicated creation
self.assertEqual(response.status_code, 201)
class AdminResourceRequestTests(APITestCase):
def test_staff_can_approve(self):
"""
Admins can approve users' requests
"""
user = AtmosphereUser.objects.create(username='user')
resource_request = create_request_for_user(user)
update_status, _ = StatusType.objects.get_or_create(name='approved')
staff_user = AtmosphereUser.objects.create(
username='staff_user', is_staff=True
)
# Approve user request
response = submit_patch_with_payload(
{
'status': {
'id': update_status.pk
}
},
user=staff_user,
resource_request=resource_request,
view_path='api:v2:admin:resourcerequest-detail',
viewset=AdminResourceRequestViewSet
)
# Assert api success
self.assertEqual(response.status_code, 200)
# Assert that the resource request was actually approved
updated_resource_request = ResourceRequest.objects.get(
pk=resource_request.pk
)
self.assertEqual(
updated_resource_request.status.name, update_status.name
)
def create_request_for_user(user):
status, _ = StatusType.objects.get_or_create(name='pending')
return ResourceRequest.objects.create(
created_by=user, description="test", status=status, request="test"
)
def submit_patch_with_payload(
payload,
user=None,
resource_request=None,
view_path='api:v2:resourcerequest-detail',
viewset=ResourceRequestViewSet
):
"""
Submits a patch to update the a user and their resource request.
Returns the response.
"""
if not user:
user = AtmosphereUser.objects.create(username='user')
if not resource_request:
resource_request = create_request_for_user(user)
url = reverse(view_path, args=[resource_request.pk])
view = viewset.as_view({'patch': 'partial_update'})
request = APIRequestFactory().patch(url, payload, format='json')
force_authenticate(request, user=user)
response = view(request, pk=resource_request.pk)
return response
| 32.884444
| 83
| 0.626166
|
fc898af968541078c3a5bc1b7af5562a4f38b00e
| 1,930
|
py
|
Python
|
class10/ex1_serial.py
|
sbyount/pyaut3
|
2fcf19851487db49d76d5b6996ee0f9194d90816
|
[
"Apache-2.0"
] | 1
|
2019-04-17T02:49:58.000Z
|
2019-04-17T02:49:58.000Z
|
class10/ex1_serial.py
|
sbyount/pyaut3
|
2fcf19851487db49d76d5b6996ee0f9194d90816
|
[
"Apache-2.0"
] | null | null | null |
class10/ex1_serial.py
|
sbyount/pyaut3
|
2fcf19851487db49d76d5b6996ee0f9194d90816
|
[
"Apache-2.0"
] | null | null | null |
from my_devices import network_devices as devices
from netmiko import ConnectHandler
from datetime import datetime
'''
1a. As you have done in previous classes, create a Python file named "my_devices.py".
In this file, define the connection information for: 'cisco3', 'arista1', 'arista2',
and 'srx2'. This file should contain all the necessary information to create a
Netmiko connection. Use getpass() for the password handling. Use a global_delay_factor
of 4 for both the arista1 device and the arista2 device. This Python module should
be used to store the connection information for all of the exercises in this lesson.
1b. Create a Python script that executes "show version" on each of the network
devices defined in my_devices.py. This script should execute serially i.e. one
SSH connection after the other. Record the total execution time for the script.
Print the "show version" output and the total execution time to standard output.
As part of this exercise, you should create a function that both establishes a
Netmiko connection and that executes a single show command that you pass in as
argument. This function's arguments should be the Netmiko device dictionary and
the "show-command" argument. The function should return the result from the show
command.
'''
def ssh_command(device, command):
"""Establish an SSH connection. Execute command, return results."""
device = ConnectHandler(**device)
output = device.send_command(command)
device.disconnect()
return output
def main():
'''1b Interate through the list of devices and send the show version cmd '''
start_time = datetime.now()
for device in devices:
output = ssh_command(device, 'show version')
print('*' * 20 + device['host'] + '*' * 20)
print(output)
print('#' * 60)
end_time = datetime.now()
print(f'Total time: {end_time - start_time}')
if __name__ == "__main__":
main()
| 42.888889
| 86
| 0.744041
|
e73e06c224f5e7da18c5349ee4d31dcd79af0c82
| 145
|
py
|
Python
|
lang/phtml/phtml.py
|
bcooper94/knausj_talon
|
067c0f959d8fbf927cceb7e3842a25eb82429fdb
|
[
"Unlicense"
] | null | null | null |
lang/phtml/phtml.py
|
bcooper94/knausj_talon
|
067c0f959d8fbf927cceb7e3842a25eb82429fdb
|
[
"Unlicense"
] | null | null | null |
lang/phtml/phtml.py
|
bcooper94/knausj_talon
|
067c0f959d8fbf927cceb7e3842a25eb82429fdb
|
[
"Unlicense"
] | null | null | null |
from talon import Context, Module
mod = Module()
ctx = Context()
ctx.matches = r"""
mode: user.phtml
mode: command
and code.language: phtml
"""
| 14.5
| 33
| 0.703448
|
96a9544f69bfc35ac0cfd9d311119824ddbc067e
| 428
|
py
|
Python
|
python3/june/day_18_H-Index II.py
|
kashyapvinay/leetcode-challenge
|
750b0056cb547dc5266d142a9a5048ebd50d8ae3
|
[
"MIT"
] | 1
|
2020-06-01T11:35:46.000Z
|
2020-06-01T11:35:46.000Z
|
python3/june/day_18_H-Index II.py
|
kashyapvinay/leetcode-challenge
|
750b0056cb547dc5266d142a9a5048ebd50d8ae3
|
[
"MIT"
] | null | null | null |
python3/june/day_18_H-Index II.py
|
kashyapvinay/leetcode-challenge
|
750b0056cb547dc5266d142a9a5048ebd50d8ae3
|
[
"MIT"
] | null | null | null |
class Solution:
def hIndex(self, citations: List[int]) -> int:
N, left, right = len(citations), 0, len(citations)-1
while left <= right:
mid = left + (right - left)//2
if citations[mid] == N - mid:
return citations[mid]
elif citations[mid] > N - mid:
right = mid - 1
else:
left = mid + 1
return N - left
| 32.923077
| 60
| 0.46729
|
d8a9fe9c1decb4301979951f8e67e1e4dd06fff5
| 1,098
|
py
|
Python
|
wetterdienst/dwd/network.py
|
kmuehlbauer/wetterdienst
|
85e72ccdbd00f0e8285e1ba24800dfafb81ccd63
|
[
"MIT"
] | 1
|
2021-01-23T22:52:52.000Z
|
2021-01-23T22:52:52.000Z
|
wetterdienst/dwd/network.py
|
kmuehlbauer/wetterdienst
|
85e72ccdbd00f0e8285e1ba24800dfafb81ccd63
|
[
"MIT"
] | null | null | null |
wetterdienst/dwd/network.py
|
kmuehlbauer/wetterdienst
|
85e72ccdbd00f0e8285e1ba24800dfafb81ccd63
|
[
"MIT"
] | null | null | null |
import logging
from functools import lru_cache
from io import BytesIO
import requests
from requests.adapters import HTTPAdapter
from wetterdienst.dwd.metadata.constants import DWD_SERVER
logger = logging.getLogger(__name__)
def download_file_from_dwd(url: str) -> BytesIO:
"""
A function used to download a specified file from the server.
:param url: The url to the file on the dwd server ("https://www.
:return: Bytes of the file.
"""
dwd_session = create_dwd_session()
logger.info(f"Downloading resource {url}")
r = dwd_session.get(url)
r.raise_for_status()
return BytesIO(r.content)
MAX_RETRIES = 3
@lru_cache(maxsize=None)
def create_dwd_session() -> requests.Session:
"""
Function used to create a global session that is used for listing/downloading data
from the DWD server.
Returns:
requests.Session object that then can be used for requests to the server
"""
dwd_session = requests.Session()
dwd_session.mount(DWD_SERVER, HTTPAdapter(max_retries=MAX_RETRIES))
return dwd_session
| 22.875
| 86
| 0.715847
|
848c0284779ad8891563d65241273de29e50769f
| 3,143
|
py
|
Python
|
python/example_code/lookoutvision/test/test_projects.py
|
iconara/aws-doc-sdk-examples
|
52706b31b4fce8fb89468e56743edf5369e69628
|
[
"Apache-2.0"
] | 2
|
2022-01-17T13:03:22.000Z
|
2022-02-01T22:31:08.000Z
|
python/example_code/lookoutvision/test/test_projects.py
|
iconara/aws-doc-sdk-examples
|
52706b31b4fce8fb89468e56743edf5369e69628
|
[
"Apache-2.0"
] | 1
|
2020-03-18T17:00:15.000Z
|
2020-03-18T17:04:05.000Z
|
python/example_code/lookoutvision/test/test_projects.py
|
iconara/aws-doc-sdk-examples
|
52706b31b4fce8fb89468e56743edf5369e69628
|
[
"Apache-2.0"
] | 27
|
2020-04-16T22:52:53.000Z
|
2021-09-30T22:55:58.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for projects.py.
"""
import datetime
import boto3
from botocore.exceptions import ClientError
import pytest
from models import Models
from projects import Projects
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_create_project(make_stubber, error_code):
lookoutvision_client = boto3.client('lookoutvision')
lookoutvision_stubber = make_stubber(lookoutvision_client)
project_name = 'test-project_name'
project_arn = 'test-arn'
lookoutvision_stubber.stub_create_project(
project_name, project_arn, error_code=error_code)
if error_code is None:
got_project_arn = Projects.create_project(lookoutvision_client, project_name)
assert got_project_arn == project_arn
else:
with pytest.raises(ClientError) as exc_info:
Projects.create_project(lookoutvision_client, project_name)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_delete_project(make_stubber, error_code):
lookoutvision_client = boto3.client('lookoutvision')
lookoutvision_stubber = make_stubber(lookoutvision_client)
project_name = 'test-project_name'
project_arn = 'test-arn'
lookoutvision_stubber.stub_delete_project(
project_name, project_arn, error_code=error_code)
if error_code is None:
Projects.delete_project(lookoutvision_client, project_name)
else:
with pytest.raises(ClientError) as exc_info:
Projects.delete_project(lookoutvision_client, project_name)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_projects'),
('TestException', 'stub_describe_project'),
('TestException', 'stub_list_models'),
])
def test_list_projects(make_stubber, stub_runner, monkeypatch, error_code, stop_on_method):
lookoutvision_client = boto3.client('lookoutvision')
lookoutvision_stubber = make_stubber(lookoutvision_client)
project_name = 'test-project'
project_arn = 'test-arn'
created = datetime.datetime.now()
model_version = 'test-model'
dataset = {'DatasetType': 'testing', 'StatusMessage': 'nicely tested'}
monkeypatch.setattr(Models, 'describe_model', lambda x, y, z: None)
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
lookoutvision_stubber.stub_list_projects, [project_name],
[{'arn': project_arn, 'created': created}])
runner.add(lookoutvision_stubber.stub_describe_project, project_name, [dataset])
runner.add(lookoutvision_stubber.stub_list_models, project_name, [model_version])
if error_code is None:
Projects.list_projects(lookoutvision_client)
else:
with pytest.raises(ClientError) as exc_info:
Projects.list_projects(lookoutvision_client)
assert exc_info.value.response['Error']['Code'] == error_code
| 37.416667
| 91
| 0.735921
|
3a78b3d4be72dcf3e4363df9f8f6fb976dbb0d82
| 1,489
|
py
|
Python
|
FPIDjango/settings_private.py
|
snelzing/Food-Pantry-Inventory
|
ff8803cc53ceeb012fb97c60373734f03f03bf6a
|
[
"MIT"
] | 1
|
2019-02-21T05:22:06.000Z
|
2019-02-21T05:22:06.000Z
|
FPIDjango/settings_private.py
|
snelzing/Food-Pantry-Inventory
|
ff8803cc53ceeb012fb97c60373734f03f03bf6a
|
[
"MIT"
] | null | null | null |
FPIDjango/settings_private.py
|
snelzing/Food-Pantry-Inventory
|
ff8803cc53ceeb012fb97c60373734f03f03bf6a
|
[
"MIT"
] | null | null | null |
"""
settings_private.py - Shadow or pseudo-private file.
This file has dummy settings in it. The purpose is to show the format of
your real settings_private file in the private subdirectory.
The files at this level are dummy files that are safe to upload to GitHub.
The equivalent files in the private subdirectory are ignored by git so it
is safe to put your sensitive (and really private) parameters in those files.
When you run Django on your system for real, change the environment
variable for DJANGO_SETTINGS_MODULE from FPIDjango.settings to
FPIDjango.private.settings.
"""
__author__ = '(Multiple)'
__project__ = "Food-Pantry-Inventory"
__creation_date__ = "04/01/2019"
# The name of the engine Django needs to use to access the database
DB_ENGINE = 'django.db.backends.postgresql'
# The name of your database
DB_NAME = 'WARM'
# The user ID to be used by Django to accss the database.
DB_USER = 'postgres'
# The password for this user
DB_PSWD = 'PSWD'
# The host for the database server
DB_HOST = 'localhost' # can also be '127.0.0.1'
# The port used by the database server
DB_PORT = '5432'
# Specify any additonal private parameters here.
MY_SECRET_KEY = '<specify your own random string of 50 characters>'
# This snippet of code can be used to generate a random secret key
# from string import printable
# from datetime import datetime
# from random import choice, seed
# seed(datetime.now().microsecond)
# "".join([choice(printable) for i in range(50)])
# EOF
| 29.78
| 77
| 0.762257
|
5fa0623f778c79716165d5365488b0de48edbc46
| 45
|
py
|
Python
|
cheinsteinpy/__init__.py
|
DouglasTaylorSupportGroup/cheinstein.py
|
8be32036ba14aa79f1ca66c2efcf8b71aacbd147
|
[
"MIT"
] | 3
|
2022-02-04T22:25:59.000Z
|
2022-02-15T04:56:34.000Z
|
cheinsteinpy/__init__.py
|
DouglasTaylorSupportGroup/cheinstein.py
|
8be32036ba14aa79f1ca66c2efcf8b71aacbd147
|
[
"MIT"
] | null | null | null |
cheinsteinpy/__init__.py
|
DouglasTaylorSupportGroup/cheinstein.py
|
8be32036ba14aa79f1ca66c2efcf8b71aacbd147
|
[
"MIT"
] | 3
|
2022-01-24T19:52:17.000Z
|
2022-02-23T09:12:46.000Z
|
from .api import *
from .requestPage import *
| 22.5
| 26
| 0.755556
|
64aa888452657490b7e8b49f685c17a60c75b938
| 7,165
|
py
|
Python
|
sds/utils/general.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 12
|
2019-09-21T13:52:09.000Z
|
2022-02-14T06:48:46.000Z
|
sds/utils/general.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 1
|
2020-01-22T12:34:52.000Z
|
2020-01-26T21:14:11.000Z
|
sds/utils/general.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 5
|
2019-09-18T15:11:26.000Z
|
2021-12-10T14:04:53.000Z
|
import numpy as np
import numpy.random as npr
from scipy.linalg import block_diag
from scipy.optimize import linear_sum_assignment
from operator import add, sub
from functools import lru_cache
from functools import wraps
from functools import reduce
from itertools import tee
from itertools import islice
import random
def train_test_split(obs, act, nb_traj_splits=7, seed=0,
split_trajs=False, begin=50, horizon=150, nb_time_splits=3):
from sklearn.model_selection import KFold, ShuffleSplit
train_obs, train_act, test_obs, test_act = [], [], [], []
list_idx = np.linspace(0, len(obs) - 1, len(obs), dtype=int)
# cv = KFold(nb_traj_splits, shuffle=True, random_state=seed)
cv = ShuffleSplit(nb_traj_splits, test_size=0.2, random_state=seed)
for train_list_idx, test_list_idx in cv.split(list_idx):
_train_obs = [obs[i] for i in train_list_idx]
_train_act = [act[i] for i in train_list_idx]
if split_trajs:
_train_obs_splits, _train_act_splits = [], []
for _obs, _act in zip(_train_obs, _train_act):
length = _obs.shape[0]
points = np.linspace(0, length - horizon, nb_time_splits + 1, dtype=int)[1:]
for t in points:
_train_obs_splits.append(_obs[t: t + horizon])
_train_act_splits.append(_act[t: t + horizon])
_train_obs += _train_obs_splits
_train_act += _train_act_splits
train_obs.append(_train_obs)
train_act.append(_train_act)
test_obs.append([obs[i] for i in test_list_idx])
test_act.append([act[i] for i in test_list_idx])
return train_obs, train_act, test_obs, test_act
def batches(batchsize, datasize):
idx_all = random.sample(range(datasize), batchsize)
idx_iter = iter(idx_all)
yield from iter(lambda: list(islice(idx_iter, batchsize)), [])
def one_hot(K, z):
z = np.atleast_1d(z).astype(int)
assert np.all(z >= 0) and np.all(z < K)
shp = z.shape
N = z.size
zoh = np.zeros((N, K))
zoh[np.arange(N), np.arange(K)[np.ravel(z)]] = 1
zoh = np.reshape(zoh, shp + (K,))
return zoh
def np_cache(function):
@lru_cache()
def cached_wrapper(hashable_array, *args):
array = np.array(hashable_array)
return function(array, *args)
@wraps(function)
def wrapper(array, *args):
array_tuple = tuple(zip(*array.T.tolist()))
return cached_wrapper(array_tuple, *args)
# copy lru_cache attributes over too
wrapper.cache_info = cached_wrapper.cache_info
wrapper.cache_clear = cached_wrapper.cache_clear
return wrapper
def arstack(x, lag=1):
t = tee(x, lag)
for i in range(1, lag):
for j in range(0, i):
next(t[i], None)
xl = list(zip(*t))
if len(xl) == 0:
xr = np.zeros((0, lag * x.shape[-1]))
return xr
else:
xr = np.stack(xl)
return np.reshape(xr, (len(xr), -1))
def flatten_to_dim(X, d):
assert X.ndim >= d
assert d > 0
return np.reshape(X[None, ...], (-1,) + X.shape[-d:])
def find_state_overlap(z1, z2, K1=None, K2=None):
assert z1.dtype == int and z2.dtype == int
assert z1.shape == z2.shape
assert z1.min() >= 0 and z2.min() >= 0
K1 = z1.max() + 1 if K1 is None else K1
K2 = z2.max() + 1 if K2 is None else K2
overlap = np.zeros((K1, K2))
for k1 in range(K1):
for k2 in range(K2):
overlap[k1, k2] = np.sum((z1 == k1) & (z2 == k2))
return overlap
def find_permutation(z1, z2, K1=None, K2=None):
overlap = find_state_overlap(z1, z2, K1=K1, K2=K2)
K1, K2 = overlap.shape
tmp, perm = linear_sum_assignment(-overlap)
assert np.all(tmp == np.arange(K1)), "All indices should have been matched!"
# Pad permutation if K1 < K2
if K1 < K2:
unused = np.array(list(set(np.arange(K2)) - set(perm)))
perm = np.concatenate((perm, unused))
return perm
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * npr.rand()
if n == 1:
return npr.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(npr.randn(n, n))[0]
return q.dot(out).dot(q.T)
def linear_regression(Xs, ys, weights=None,
mu0=0., sigma0=1e32,
nu0=0, psi0=1.,
fit_intercept=True):
Xs = Xs if isinstance(Xs, (list, tuple)) else [Xs]
ys = ys if isinstance(ys, (list, tuple)) else [ys]
assert len(Xs) == len(ys)
D = Xs[0].shape[1]
P = ys[0].shape[1]
assert all([X.shape[1] == D for X in Xs])
assert all([y.shape[1] == P for y in ys])
assert all([X.shape[0] == y.shape[0] for X, y in zip(Xs, ys)])
nu0 = np.maximum(nu0, P + 1)
mu0 = mu0 * np.ones((P, D))
sigma0 = sigma0 * np.eye(D)
# Make sure the weights are the weights
if weights is not None:
weights = weights if isinstance(weights, (list, tuple)) else [weights]
else:
weights = [np.ones(X.shape[0]) for X in Xs]
# Add weak prior on intercept
if fit_intercept:
mu0 = np.column_stack((mu0, np.zeros(P)))
sigma0 = block_diag(sigma0, 1e32 * np.eye(1))
# Compute the posterior
J = np.linalg.inv(sigma0)
h = np.dot(J, mu0.T)
for X, y, weight in zip(Xs, ys, weights):
X = np.column_stack((X, np.ones(X.shape[0]))) if fit_intercept else X
J += np.dot(X.T * weight, X)
h += np.dot(X.T * weight, y)
# Solve for the MAP estimate
# W = np.dot(h.T, np.linalg.pinv(J)) # method 1
W = np.linalg.lstsq(J, h, rcond=None)[0].T # method 2
if fit_intercept:
W, b = W[:, :-1], W[:, -1]
else:
b = 0
# Compute the residual and the posterior variance
nu = nu0
Psi = psi0 * np.eye(P)
for X, y, weight in zip(Xs, ys, weights):
yhat = np.dot(X, W.T) + b
resid = y - yhat
nu += np.sum(weight)
Psi += np.sum(weight[:, None, None] * resid[:, :, None] * resid[:, None, :], axis=0)
# Psi += np.einsum('t,ti,tj->ij', weight, resid, resid)
# Get MAP estimate of posterior covariance
Sigma = Psi / (nu + P + 1)
if fit_intercept:
return W, b, Sigma
else:
return W, Sigma
def islist(*args):
return all(isinstance(_arg, list) for _arg in args)
class Statistics(tuple):
def __new__(cls, x):
return tuple.__new__(Statistics, x)
def __add__(self, y):
gsum = lambda x, y: reduce(lambda a, b: list(map(add, a, b)) if islist(x, y) else a + b, [x, y])
return Statistics(tuple(map(gsum, self, y)))
def __sub__(self, y):
gsub = lambda x, y: reduce(lambda a, b: list(map(sub, a, b)) if islist(x, y) else a - b, [x, y])
return Statistics(tuple(map(gsub, self, y)))
def __mul__(self, a):
return Statistics(a * e for e in self)
def __rmul__(self, a):
return Statistics(a * e for e in self)
| 29.485597
| 104
| 0.590649
|
53c85332dff434338fdfcda3403149e0ce64fe9f
| 1,990
|
py
|
Python
|
python/alibiexplainer/alibiexplainer/anchor_images.py
|
ittus/kserve
|
922a9b7e8a9a86b5ae65faf4ce863927873fd456
|
[
"Apache-2.0"
] | 1,146
|
2019-03-27T21:14:34.000Z
|
2021-09-22T08:36:46.000Z
|
python/alibiexplainer/alibiexplainer/anchor_images.py
|
ittus/kserve
|
922a9b7e8a9a86b5ae65faf4ce863927873fd456
|
[
"Apache-2.0"
] | 1,803
|
2019-03-27T22:16:02.000Z
|
2021-09-22T15:27:44.000Z
|
python/alibiexplainer/alibiexplainer/anchor_images.py
|
ittus/kserve
|
922a9b7e8a9a86b5ae65faf4ce863927873fd456
|
[
"Apache-2.0"
] | 573
|
2019-03-27T21:14:58.000Z
|
2021-09-20T21:15:52.000Z
|
# Copyright 2021 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kserve
import logging
import numpy as np
import alibi
from alibi.api.interfaces import Explanation
from alibi.utils.wrappers import ArgmaxTransformer
from alibiexplainer.explainer_wrapper import ExplainerWrapper
from typing import Callable, List, Optional
logging.basicConfig(level=kserve.constants.KSERVE_LOGLEVEL)
class AnchorImages(ExplainerWrapper):
def __init__(
self,
predict_fn: Callable,
explainer: Optional[alibi.explainers.AnchorImage],
**kwargs
):
if explainer is None:
raise Exception("Anchor images requires a built explainer")
self.predict_fn = predict_fn
self.anchors_image = explainer
self.kwargs = kwargs
def explain(self, inputs: List) -> Explanation:
arr = np.array(inputs)
# check if predictor returns predicted class or prediction probabilities for each class
# if needed adjust predictor so it returns the predicted class
if np.argmax(self.predict_fn(arr).shape) == 0:
self.anchors_image.predictor = self.predict_fn
else:
self.anchors_image.predictor = ArgmaxTransformer(self.predict_fn)
logging.info("Calling explain on image of shape %s", (arr.shape,))
logging.info("anchor image call with %s", self.kwargs)
anchor_exp = self.anchors_image.explain(arr[0], **self.kwargs)
return anchor_exp
| 39.019608
| 95
| 0.721608
|
dcf9a802db8b6d261e5506b9df772636bb2a516f
| 347
|
py
|
Python
|
posthog/migrations/0157_plugin_metrics.py
|
brave-care/posthog
|
8edd14a16ad936fb241dcf856925e9f2ea87cba4
|
[
"MIT"
] | 7,409
|
2020-02-09T23:18:10.000Z
|
2022-03-31T22:36:25.000Z
|
posthog/migrations/0157_plugin_metrics.py
|
brave-care/posthog
|
8edd14a16ad936fb241dcf856925e9f2ea87cba4
|
[
"MIT"
] | 5,709
|
2020-02-09T23:26:13.000Z
|
2022-03-31T20:20:01.000Z
|
posthog/migrations/0157_plugin_metrics.py
|
brave-care/posthog
|
8edd14a16ad936fb241dcf856925e9f2ea87cba4
|
[
"MIT"
] | 647
|
2020-02-13T17:50:55.000Z
|
2022-03-31T11:24:19.000Z
|
# Generated by Django 3.1.8 on 2021-06-21 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0156_insight_short_id"),
]
operations = [
migrations.AddField(model_name="plugin", name="metrics", field=models.JSONField(default=dict, null=True),),
]
| 23.133333
| 115
| 0.677233
|
6282bb24bb6a32d0f63c7cbaa484d220412a1349
| 4,999
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/operations/_subscription_client_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/operations/_subscription_client_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/operations/_subscription_client_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_check_resource_name_request(
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2016-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Resources/checkResourceName')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class SubscriptionClientOperationsMixin(object):
@distributed_trace
def check_resource_name(
self,
resource_name_definition: Optional["_models.ResourceName"] = None,
**kwargs: Any
) -> "_models.CheckResourceNameResult":
"""Checks resource name validity.
A resource name is valid if it is not a reserved word, does not contains a reserved word and
does not start with a reserved word.
:param resource_name_definition: Resource object with values for resource name and resource
type.
:type resource_name_definition:
~azure.mgmt.resource.subscriptions.v2016_06_01.models.ResourceName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckResourceNameResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2016_06_01.models.CheckResourceNameResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckResourceNameResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if resource_name_definition is not None:
_json = self._serialize.body(resource_name_definition, 'ResourceName')
else:
_json = None
request = build_check_resource_name_request(
content_type=content_type,
json=_json,
template_url=self.check_resource_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckResourceNameResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_resource_name.metadata = {'url': '/providers/Microsoft.Resources/checkResourceName'} # type: ignore
| 40.97541
| 133
| 0.691938
|
b9a9eec5d940e74a62dda96614fadd43bfe77b69
| 6,282
|
py
|
Python
|
kalendr_root/urls.py
|
bewallyt/Kalendr
|
45b793faae67f923f8b12074d1e9560b913c0ca2
|
[
"MIT"
] | null | null | null |
kalendr_root/urls.py
|
bewallyt/Kalendr
|
45b793faae67f923f8b12074d1e9560b913c0ca2
|
[
"MIT"
] | null | null | null |
kalendr_root/urls.py
|
bewallyt/Kalendr
|
45b793faae67f923f8b12074d1e9560b913c0ca2
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url, include
from rest_framework_nested import routers
from authentication.views import AccountViewSet, LoginView, LogoutView
from posts.views import AccountPostsViewSet, PostViewSet, NotificationPostView, GetSharedPostView, PostUpdateView, \
AccountUpdatePudPostViewSet, AccountSavePudPostViewSet
from kalendr_root.views import IndexView
from groups.views import GroupViewSet, AccountGroupsViewSet, AccountFollowingViewSet, AccountFollowerGroupsViewSet, \
AccountNonFollowerGroupsViewSet, AccountSpecificGroupViewSet, AccountLatestGroupViewSet, \
AccountFollowingPersonViewSet, AccountFollowingGroupViewSet
from access.views import AccessViewSet, AccountAccessViewSet, NotificationResponseView,PartialUpdateView
from puds.views import AccountPudsViewSet, PudViewSet, AccountCompletePudViewSet
from freetime.views import FreeTimeViewSet
from signup.views import SignUpCreateAndListView, SignUpView
from schedule.views import ScheduleViewSet
from pre_signup.views import PrefSignUpCreatAndListView, RequesterSignUpView, ResolveSignupView
# Base router
router = routers.SimpleRouter()
router.register(r'accounts', AccountViewSet)
router.register(r'posts', PostViewSet)
router.register(r'groups', GroupViewSet)
# This initially gives base_name not specified error. But added serializer in PartialUpdateViewSet fixed the
# problem
# This is for updating an AccessRule instance. Partial update.
router.register(r'access', AccessViewSet)
router.register(r'access_update', PartialUpdateView)
router.register(r'puds', PudViewSet)
router.register(r'notification_posts', NotificationPostView)
router.register(r'notification_response', NotificationResponseView)
router.register(r'post_update', PostUpdateView)
router.register(r'freetime', FreeTimeViewSet)
# Initially used APIView. Didn't make it to make, keep getting 405. Switched to
# ModelViewSet and worked at once. Experience:
router.register(r'signup', SignUpCreateAndListView)
# For creating Pref-based signup
router.register(r'signupPref', PrefSignUpCreatAndListView)
router.register(r'schedule', ScheduleViewSet)
# For return description of a specific signup post
signup_router = routers.NestedSimpleRouter(router, r'signup', lookup='post')
signup_router.register(r'get_description', SignUpCreateAndListView)
signup_router.register(r'request', SignUpView)
# Get the list of slot for a pref_based signup
signup_router.register(r'prefSlots', RequesterSignUpView)
signup_router.register(r'requestPref', RequesterSignUpView)
signup_router.register(r'get_suggestion', ResolveSignupView)
signup_router.register(r'resolve_schedule', ResolveSignupView)
# For other users to select signup slots
select_slot_router = routers.NestedSimpleRouter(signup_router, r'get_description', lookup='duration')
select_slot_router.register(r'request', SignUpView)
accounts_router = routers.NestedSimpleRouter(router, r'accounts', lookup='account')
# /api/v1/accounts/"user_id/name"/posts/
accounts_router.register(r'posts', AccountPostsViewSet)
# all the groups that I own
accounts_router.register(r'groups', AccountGroupsViewSet)
# all the follower groups that I own
accounts_router.register(r'myfollowergroups', AccountFollowerGroupsViewSet)
# all the non-follower groups that I own
accounts_router.register(r'mynonfollowergroups', AccountNonFollowerGroupsViewSet)
# all the non-follower groups that I;m a member of
accounts_router.register(r'mymembergroups', AccountFollowingGroupViewSet)
# all non-groups im following
accounts_router.register(r'following', AccountFollowingPersonViewSet)
# all posts shared with me
accounts_router.register(r'access', AccountAccessViewSet)
accounts_router.register(r'puds', AccountPudsViewSet)
# all the groups that I'm a member of and do not own
accounts_router.register(r'following_all', AccountFollowingViewSet)
accounts_router.register(r'specific_group', AccountSpecificGroupViewSet)
accounts_router.register(r'latest_group', AccountLatestGroupViewSet)
# API endpoint for getting posts that a user shared with me
# /api/v1/accounts/"owner_name"/get_shared/
accounts_router.register(r'get_shared', GetSharedPostView)
week_router = routers.NestedSimpleRouter(accounts_router, r'posts', lookup='post')
# api/v1/accounts/"user_name/id"/posts/"post_id|week_num"/week/
week_router.register(r'week', AccountPostsViewSet)
week_router.register(r'savePostPud', AccountPostsViewSet)
week_router.register(r'updatePostPud', AccountUpdatePudPostViewSet)
save_router = routers.NestedSimpleRouter(week_router, r'savePostPud', lookup='week')
save_router.register(r'pudContent', AccountSavePudPostViewSet)
pud_save_router = routers.NestedSimpleRouter(accounts_router, r'puds', lookup='pud')
pud_save_router.register(r'savePud', AccountPudsViewSet)
pud_complete_router = routers.NestedSimpleRouter(pud_save_router, r'savePud', lookup='complete')
pud_complete_router.register(r'pudComplete', AccountCompletePudViewSet)
group_router = routers.NestedSimpleRouter(router, r'accounts', lookup='account')
group_router.register(r'groups', AccountGroupsViewSet)
# /api/v1/notification_response/"post"/response/
notification_router = routers.NestedSimpleRouter(router, r'notification_response', lookup='post')
notification_router.register(r'response', NotificationResponseView)
notification_response_router = routers.NestedSimpleRouter(notification_router, r'response', lookup='res')
notification_response_router.register(r'list', NotificationResponseView)
urlpatterns = patterns(
'',
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/', include(accounts_router.urls)),
url(r'^api/v1/', include(group_router.urls)),
url(r'^api/v1/', include(week_router.urls)),
url(r'^api/v1/', include(save_router.urls)),
url(r'^api/v1/', include(signup_router.urls)),
url(r'^api/v1/', include(select_slot_router.urls)),
url(r'^api/v1/', include(pud_save_router.urls)),
url(r'^api/v1/', include(pud_complete_router.urls)),
url(r'^api/v1/', include(notification_router.urls)),
url(r'^api/v1/', include(notification_response_router.urls)),
url(r'^api/v1/auth/login/$', LoginView.as_view(), name='login'),
url(r'^api/v1/auth/logout/$', LogoutView.as_view(), name='logout'),
url(r'^.*$', IndexView.as_view(), name='index'),
)
| 48.697674
| 117
| 0.813594
|
8c18bf0a30545f7a6dff7302aff0261d940df4af
| 1,089
|
py
|
Python
|
spatialdata/geocoords/Converter.py
|
rwalkerlewis/spatialdata
|
515c8d9dec21d261d0d654b5c30e6759565268d2
|
[
"MIT"
] | 6
|
2017-09-19T11:05:33.000Z
|
2019-09-26T08:18:30.000Z
|
spatialdata/geocoords/Converter.py
|
rwalkerlewis/spatialdata
|
515c8d9dec21d261d0d654b5c30e6759565268d2
|
[
"MIT"
] | 38
|
2017-06-28T15:44:50.000Z
|
2022-02-17T04:04:02.000Z
|
spatialdata/geocoords/Converter.py
|
rwalkerlewis/spatialdata
|
515c8d9dec21d261d0d654b5c30e6759565268d2
|
[
"MIT"
] | 11
|
2015-11-09T06:29:35.000Z
|
2021-06-02T14:13:59.000Z
|
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# @file spatialdata/geocoords/Converter.py
#
# @brief Python function to convert b/t coordinate systems.
def convert(coords, csDest, csSrc):
"""
Convert coordinates from source coordinate system to destination
coordinate system. Transformation is done in place.
"""
if not csDest.getSpaceDim() == csSrc.getSpaceDim():
msg = "Spatial dimensions of source (%d) and destination (%d) " \
"coordinate systems must match." % (csSrc.getSpaceDim(), csDest.getSpaceDim())
raise ValueError(msg)
from . import geocoords
converter = geocoords.Converter()
converter.convert(coords, csDest, csSrc)
return
# End of file
| 28.657895
| 92
| 0.604224
|
740d3e1c31b2b942c6e2afc5a89491ded5a8e991
| 21,706
|
py
|
Python
|
scholarly/author_parser.py
|
ma-ji/scholarly
|
47bcbd099b63c6e9f1eab13b85f90c8a47c77deb
|
[
"Unlicense"
] | 1
|
2021-12-26T03:55:06.000Z
|
2021-12-26T03:55:06.000Z
|
scholarly/author_parser.py
|
ma-ji/scholarly
|
47bcbd099b63c6e9f1eab13b85f90c8a47c77deb
|
[
"Unlicense"
] | 1
|
2021-11-02T22:07:30.000Z
|
2021-11-02T22:07:30.000Z
|
scholarly/author_parser.py
|
ma-ji/scholarly
|
47bcbd099b63c6e9f1eab13b85f90c8a47c77deb
|
[
"Unlicense"
] | null | null | null |
from .publication_parser import PublicationParser
import re
from .data_types import Author, AuthorSource, PublicationSource, PublicAccess
from selenium.webdriver.common.by import By
import codecs
_CITATIONAUTHRE = r'user=([\w-]*)'
_HOST = 'https://scholar.google.com{0}'
_PAGESIZE = 100
_EMAILAUTHORRE = r'Verified email at '
_CITATIONAUTH = '/citations?hl=en&user={0}'
_COAUTH = ('https://scholar.google.com/citations?user={0}&hl=en'
'#d=gsc_md_cod&u=%2Fcitations%3Fview_op%3Dlist_colleagues'
'%26hl%3Den%26json%3D%26user%3D{0}%23t%3Dgsc_cod_lc')
_MANDATES = "/citations?hl=en&tzom=300&user={0}&view_op=list_mandates&pagesize={1}"
class AuthorParser:
"""Returns an object for a single author"""
def __init__(self, nav):
self.nav = nav
self._sections = ['basics',
'indices',
'counts',
'coauthors',
'publications',
'public_access']
def get_author(self, __data)->Author:
""" Fills the information for an author container
"""
author: Author = {'container_type': 'Author'}
author['filled'] = []
if isinstance(__data, str):
author['scholar_id'] = __data
author['source'] = AuthorSource.AUTHOR_PROFILE_PAGE
else:
author['source'] = AuthorSource.SEARCH_AUTHOR_SNIPPETS
author['scholar_id'] = re.findall(_CITATIONAUTHRE, __data('a')[0]['href'])[0]
pic = '/citations?view_op=medium_photo&user={}'.format(author['scholar_id'])
author['url_picture'] = _HOST.format(pic)
name_class = self._find_tag_class_name(__data, 'h3', 'name')
author['name'] = __data.find('h3', class_=name_class).text
aff_class = self._find_tag_class_name(__data, 'div', 'aff')
affiliation = __data.find('div', class_=aff_class)
if affiliation:
author['affiliation'] = affiliation.text
email_class = self._find_tag_class_name(__data, 'div', 'eml')
email = __data.find('div', class_=email_class)
if email:
author['email_domain'] = re.sub(_EMAILAUTHORRE, r'@', email.text)
int_class = self._find_tag_class_name(__data, 'a', 'one_int')
interests = __data.find_all('a', class_=int_class)
author['interests'] = [i.text.strip() for i in interests]
citedby_class = self._find_tag_class_name(__data, 'div', 'cby')
citedby = __data.find('div', class_=citedby_class)
if citedby and citedby.text != '':
author['citedby'] = int(citedby.text[9:])
return author
def _find_tag_class_name(self, __data, tag, text):
elements = __data.find_all(tag)
for element in elements:
if 'class' in element.attrs and text in element.attrs['class'][0]:
return element.attrs['class'][0]
def _fill_basics(self, soup, author):
author['name'] = soup.find('div', id='gsc_prf_in').text
if author['source'] == AuthorSource.AUTHOR_PROFILE_PAGE:
res = soup.find('img', id='gsc_prf_pup-img')
if res is not None:
if "avatar_scholar" not in res['src']:
author['url_picture'] = res['src']
elif author['source'] == AuthorSource.CO_AUTHORS_LIST:
picture = soup.find('img', id="gsc_prf_pup-img").get('src')
if "avatar_scholar" in picture:
picture = _HOST.format(picture)
author['url_picture'] = picture
affiliation = soup.find('div', class_='gsc_prf_il')
author['affiliation'] = affiliation.text
affiliation_link = affiliation.find('a')
if affiliation_link:
author['organization'] = int(affiliation_link.get('href').split("org=")[-1])
author['interests'] = [i.text.strip() for i in
soup.find_all('a', class_='gsc_prf_inta')]
email = soup.find('div', id="gsc_prf_ivh", class_="gsc_prf_il")
if author['source'] == AuthorSource.AUTHOR_PROFILE_PAGE:
if email.text != "No verified email":
author['email_domain'] = '@'+email.text.split(" ")[3]
homepage = email.find('a', class_="gsc_prf_ila")
if homepage:
author['homepage'] = homepage.get('href')
index = soup.find_all('td', class_='gsc_rsb_std')
if index:
author['citedby'] = int(index[0].text)
def _fill_indices(self, soup, author):
index = soup.find_all('td', class_='gsc_rsb_std')
if index:
author['citedby'] = int(index[0].text)
author['citedby5y'] = int(index[1].text)
author['hindex'] = int(index[2].text)
author['hindex5y'] = int(index[3].text)
author['i10index'] = int(index[4].text)
author['i10index5y'] = int(index[5].text)
else:
author['hindex'] = 0
author['hindex5y'] = 0
author['i10index'] = 0
author['i10index5y'] = 0
def _fill_counts(self, soup, author):
years = [int(y.text)
for y in soup.find_all('span', class_='gsc_g_t')]
cites = [int(c.text)
for c in soup.find_all('span', class_='gsc_g_al')]
author['cites_per_year'] = dict(zip(years, cites))
def _fill_public_access(self, soup, author):
available = soup.find('div', class_='gsc_rsb_m_a')
not_available = soup.find('div', class_='gsc_rsb_m_na')
n_available, n_not_available = 0, 0
if available:
n_available = int(available.text.split(" ")[0])
if not_available:
n_not_available = int(not_available.text.split(" ")[0])
author["public_access"] = PublicAccess(available=n_available,
not_available=n_not_available)
if 'publications' not in author['filled']:
return
# Make a dictionary mapping to the publications
publications = {pub['author_pub_id']:pub for pub in author['publications']}
soup = self.nav._get_soup(_MANDATES.format(author['scholar_id'], _PAGESIZE))
while True:
rows = soup.find_all('div', 'gsc_mnd_sec_na')
if rows:
for row in rows[0].find_all('a', 'gsc_mnd_art_rvw gs_nph gsc_mnd_link_font'):
author_pub_id = re.findall(r"citation_for_view=([\w:-]*)",
row['data-href'])[0]
publications[author_pub_id]["public_access"] = False
rows = soup.find_all('div', 'gsc_mnd_sec_avl')
if rows:
for row in rows[0].find_all('a', 'gsc_mnd_art_rvw gs_nph gsc_mnd_link_font'):
author_pub_id = re.findall(r"citation_for_view=([\w:-]*)",
row['data-href'])[0]
publications[author_pub_id]["public_access"] = True
next_button = soup.find(class_="gs_btnPR")
if next_button and "disabled" not in next_button.attrs:
url = next_button['onclick'][17:-1]
url = codecs.getdecoder("unicode_escape")(url)[0]
soup = self.nav._get_soup(url)
else:
break
def _fill_publications(self, soup, author, publication_limit: int = 0, sortby_str: str = ''):
author['publications'] = list()
pubstart = 0
url_citations = _CITATIONAUTH.format(author['scholar_id'])
url_citations += sortby_str
pub_parser = PublicationParser(self.nav)
flag = False
while True:
for row in soup.find_all('tr', class_='gsc_a_tr'):
new_pub = pub_parser.get_publication(row, PublicationSource.AUTHOR_PUBLICATION_ENTRY)
author['publications'].append(new_pub)
if (publication_limit) and (len(author['publications']) >= publication_limit):
flag = True
break
if 'disabled' not in soup.find('button', id='gsc_bpf_more').attrs and not flag:
pubstart += _PAGESIZE
url = '{0}&cstart={1}&pagesize={2}'.format(
url_citations, pubstart, _PAGESIZE)
soup = self.nav._get_soup(url)
else:
break
def _get_coauthors_short(self, soup):
"""Get the short list of coauthors from the profile page.
To be called by _fill_coauthors method.
"""
coauthors = soup.find_all('span', class_='gsc_rsb_a_desc')
coauthor_ids = [re.findall(_CITATIONAUTHRE,
coauth('a')[0].get('href'))[0]
for coauth in coauthors]
coauthor_names = [coauth.find(tabindex="-1").text
for coauth in coauthors]
coauthor_affils = [coauth.find(class_="gsc_rsb_a_ext").text
for coauth in coauthors]
return coauthor_ids, coauthor_names, coauthor_affils
def _get_coauthors_long(self, author):
"""Get the long (>20) list of coauthors.
Opens the dialog box to get the complete list of coauthors.
To be called by _fill_coauthors method.
"""
with self.nav.pm2._get_webdriver() as wd:
wd.get(_COAUTH.format(author['scholar_id']))
# Wait up to 30 seconds for the various elements to be available.
# The wait may be better set elsewhere.
wd.implicitly_wait(30)
coauthors = wd.find_elements(By.CLASS_NAME, 'gs_ai_pho')
coauthor_ids = [re.findall(_CITATIONAUTHRE,
coauth.get_attribute('href'))[0]
for coauth in coauthors]
coauthor_names = [name.text for name in
wd.find_elements(By.CLASS_NAME, 'gs_ai_name')]
coauthor_affils = [affil.text for affil in
wd.find_elements(By.CLASS_NAME, 'gs_ai_aff')]
return coauthor_ids, coauthor_names, coauthor_affils
def _fill_coauthors(self, soup, author):
# If "View All" is not found, scrape the page for coauthors
if not soup.find_all('button', id='gsc_coauth_opn'):
coauthor_info = self._get_coauthors_short(soup)
else:
# If "View All" is found, try opening the dialog box.
# If geckodriver is not installed, resort to a short list and warn.
try:
coauthor_info = self._get_coauthors_long(author)
except Exception as err:
coauthor_info = self._get_coauthors_short(soup)
self.nav.logger.warning(err)
self.nav.logger.warning("Fetching only the top 20 coauthors")
author['coauthors'] = []
for coauth_id, coauth_name, coauth_affil in zip(*coauthor_info):
new_coauthor = self.get_author(coauth_id)
new_coauthor['name'] = coauth_name
new_coauthor['affiliation'] = coauth_affil
new_coauthor['source'] = AuthorSource.CO_AUTHORS_LIST
author['coauthors'].append(new_coauthor)
def fill(self, author, sections: list = [], sortby="citedby", publication_limit: int = 0):
"""Populate the Author with information from their profile
The `sections` argument allows for finer granularity of the profile
information to be pulled.
:param sections: Sections of author profile to be filled, defaults to ``[]``.
* ``basics``: fills name, affiliation, and interests;
* ``citations``: fills h-index, i10-index, and 5-year analogues;
* ``counts``: fills number of citations per year;
* ``public_access``: fills number of articles with public access mandates;
* ``coauthors``: fills co-authors;
* ``publications``: fills publications;
* ``[]``: fills all of the above
:type sections: ['basics','citations','counts','public_access','coauthors','publications',[]] list, optional
:param sortby: Select the order of the citations in the author page. Either by 'citedby' or 'year'. Defaults to 'citedby'.
:type sortby: string
:param publication_limit: Select the max number of publications you want you want to fill for the author. Defaults to no limit.
:type publication_limit: int
:returns: The filled object if fill was successfull, False otherwise.
:rtype: Author or bool
:Example::
.. testcode::
search_query = scholarly.search_author('Steven A Cholewiak')
author = next(search_query)
author = scholarly.fill(author, sections=['basics', 'citations', 'coauthors'])
scholarly.pprint(author)
:Output::
.. testoutput::
{'affiliation': 'Vision Scientist',
'citedby': 304,
'citedby5y': 226,
'coauthors': [{'affiliation': 'Kurt Koffka Professor of Experimental '
'Psychology, University of Giessen',
'filled': False,
'name': 'Roland Fleming',
'scholar_id': 'ruUKktgAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Professor of Vision Science, UC Berkeley',
'filled': False,
'name': 'Martin Banks',
'scholar_id': 'Smr99uEAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Durham University, Computer Science & Physics',
'filled': False,
'name': 'Gordon D. Love',
'scholar_id': '3xJXtlwAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Professor of ECE, Purdue University',
'filled': False,
'name': 'Hong Z Tan',
'scholar_id': 'OiVOAHMAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Deepmind',
'filled': False,
'name': 'Ari Weinstein',
'scholar_id': 'MnUboHYAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': "Brigham and Women's Hospital/Harvard Medical "
'School',
'filled': False,
'name': 'Chia-Chien Wu',
'scholar_id': 'dqokykoAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Professor of Psychology and Cognitive Science, '
'Rutgers University',
'filled': False,
'name': 'Jacob Feldman',
'scholar_id': 'KoJrMIAAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Research Scientist at Google Research, PhD '
'Student at UC Berkeley',
'filled': False,
'name': 'Pratul Srinivasan',
'scholar_id': 'aYyDsZ0AAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Formerly: Indiana University, Rutgers '
'University, University of Pennsylvania',
'filled': False,
'name': 'Peter C. Pantelis',
'scholar_id': 'FoVvIK0AAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Professor in Computer Science, University of '
'California, Berkeley',
'filled': False,
'name': 'Ren Ng',
'scholar_id': '6H0mhLUAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Yale University',
'filled': False,
'name': 'Steven W Zucker',
'scholar_id': 'rNTIQXYAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Brown University',
'filled': False,
'name': 'Ben Kunsberg',
'scholar_id': 'JPZWLKQAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Rutgers University, New Brunswick, NJ',
'filled': False,
'name': 'Manish Singh',
'scholar_id': '9XRvM88AAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Silicon Valley Professor of ECE, Purdue '
'University',
'filled': False,
'name': 'David S. Ebert',
'scholar_id': 'fD3JviYAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Clinical Director, Neurolens Inc.,',
'filled': False,
'name': 'Vivek Labhishetty',
'scholar_id': 'tD7OGTQAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'MIT',
'filled': False,
'name': 'Joshua B. Tenenbaum',
'scholar_id': 'rRJ9wTJMUB8C',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Chief Scientist, isee AI',
'filled': False,
'name': 'Chris Baker',
'scholar_id': 'bTdT7hAAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Professor of Psychology, Ewha Womans '
'University',
'filled': False,
'name': 'Sung-Ho Kim',
'scholar_id': 'KXQb7CAAAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Assistant Professor, Boston University',
'filled': False,
'name': 'Melissa M. Kibbe',
'scholar_id': 'NN4GKo8AAAAJ',
'source': 'CO_AUTHORS_LIST'},
{'affiliation': 'Nvidia Corporation',
'filled': False,
'name': 'Peter Shirley',
'scholar_id': 'nHx9IgYAAAAJ',
'source': 'CO_AUTHORS_LIST'}],
'email_domain': '@berkeley.edu',
'homepage': 'http://steven.cholewiak.com/',
'filled': False,
'hindex': 9,
'hindex5y': 9,
'i10index': 8,
'i10index5y': 7,
'interests': ['Depth Cues',
'3D Shape',
'Shape from Texture & Shading',
'Naive Physics',
'Haptics'],
'name': 'Steven A. Cholewiak, PhD',
'scholar_id': '4bahYMkAAAAJ',
'source': 'SEARCH_AUTHOR_SNIPPETS',
'url_picture': 'https://scholar.google.com/citations?view_op=medium_photo&user=4bahYMkAAAAJ'}
"""
try:
sections = [section.lower() for section in sections]
sections.sort(reverse=True) # Ensure 'publications' comes before 'public_access'
sortby_str = ''
if sortby == "year":
sortby_str = '&view_op=list_works&sortby=pubdate'
elif sortby != "citedby":
raise Exception("Please enter a valid sortby parameter. Options: 'year', 'citedby'")
url_citations = _CITATIONAUTH.format(author['scholar_id'])
url_citations += sortby_str
url = '{0}&pagesize={1}'.format(url_citations, _PAGESIZE)
soup = self.nav._get_soup(url)
if sections == []:
for i in self._sections:
if i not in author['filled']:
(getattr(self, f'_fill_{i}')(soup, author) if i != 'publications' else getattr(self, f'_fill_{i}')(soup, author, publication_limit, sortby_str))
author['filled'].append(i)
else:
for i in sections:
if i in self._sections and i not in author['filled']:
(getattr(self, f'_fill_{i}')(soup, author) if i != 'publications' else getattr(self, f'_fill_{i}')(soup, author, publication_limit, sortby_str))
author['filled'].append(i)
except Exception as e:
raise(e)
return author
def __repr__(self):
return self.__str__()
| 48.128603
| 168
| 0.510274
|
c1d49a192ae4a11ad53bba968954c7f907cdc62d
| 1,522
|
py
|
Python
|
fixture/session.py
|
margoam/python_training
|
00510c23df6e39cf2aa128671abc696c4b500a43
|
[
"Apache-2.0"
] | 1
|
2021-09-24T16:41:18.000Z
|
2021-09-24T16:41:18.000Z
|
fixture/session.py
|
margoam/python_training
|
00510c23df6e39cf2aa128671abc696c4b500a43
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
margoam/python_training
|
00510c23df6e39cf2aa128671abc696c4b500a43
|
[
"Apache-2.0"
] | null | null | null |
class SessionHelper: # Помощник по работе с сессией
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page() # вызов метода open_home_page
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_xpath("//*/text()[normalize-space(.)='']/parent::*").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//input[@value='Login']").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("user")
def is_logged_in_as(self, username):
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div[@id='top']/form/b").text[1:-1]
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
| 32.382979
| 87
| 0.618265
|
8013a537c4b93f0f8e1d640d311e4d608dc13f4a
| 20,549
|
py
|
Python
|
flux_mito/model_366.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_366.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_366.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 42500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 90000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259
| 798
| 0.804127
|
b6a225235ba6a7b3a8028bb80093588380fc8874
| 703
|
py
|
Python
|
aodag/wiki/wsgi.py
|
aodag/aodag-wiki
|
fe0b6b912a521ec0e4956bcd67b34f4c4c1ea2e5
|
[
"MIT"
] | null | null | null |
aodag/wiki/wsgi.py
|
aodag/aodag-wiki
|
fe0b6b912a521ec0e4956bcd67b34f4c4c1ea2e5
|
[
"MIT"
] | 1
|
2018-01-21T02:27:39.000Z
|
2018-01-21T02:27:39.000Z
|
aodag/wiki/wsgi.py
|
aodag/aodag-wiki
|
fe0b6b912a521ec0e4956bcd67b34f4c4c1ea2e5
|
[
"MIT"
] | null | null | null |
from pyramid.config import Configurator
def includeme(config):
config.add_route('top', '/')
config.add_route('wiki', '/wiki')
config.add_route('wikipage', '/wiki/{pagename}')
config.add_route('wikipage.edit', '/wiki/{pagename}/edit')
config.scan(".views")
def main(global_conf, **settings):
config = Configurator(
settings=settings,
root_factory=".resources.WikiResource",
)
config.include("pyramid_jinja2")
config.include("pyramid_tm")
config.include("pyramid_sqlalchemy")
config.include("pyramid_services")
config.include(".wsgi")
config.include(".wiki")
config.add_jinja2_renderer(".html")
return config.make_wsgi_app()
| 28.12
| 62
| 0.679943
|
c4d4da5a88183e2339c6f82227770c6c20891c3a
| 1,494
|
bzl
|
Python
|
web/java.bzl
|
uri-canva/rules_webtesting
|
8b700a3644a12ac95830f9067de8a301e6bc6531
|
[
"Apache-2.0"
] | 2
|
2021-08-05T14:15:58.000Z
|
2021-11-17T10:59:42.000Z
|
web/java.bzl
|
uri-canva/rules_webtesting
|
8b700a3644a12ac95830f9067de8a301e6bc6531
|
[
"Apache-2.0"
] | null | null | null |
web/java.bzl
|
uri-canva/rules_webtesting
|
8b700a3644a12ac95830f9067de8a301e6bc6531
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web Test rules for Java."""
load("//web/internal:wrap_web_test_suite.bzl", "wrap_web_test_suite")
def java_web_test_suite(name, java_test_tags = None, test_class = None, **kwargs):
"""Defines a test_suite of web_test targets that wrap a java_test target.
Args:
name: The base name of the test.
java_test_tags: A list of test tag strings to use for the java_test target.
test_class: Optional; default computed from name and blaze package.
**kwargs: Arguments for wrapped_web_test_suite
"""
if test_class == None:
test_package = native.package_name().replace("javatests/", "")
test_package = test_package.replace("/", ".")
test_class = test_package + "." + name
wrap_web_test_suite(
name = name,
rule = native.java_test,
test_class = test_class,
wrapped_test_tags = java_test_tags,
**kwargs
)
| 38.307692
| 83
| 0.698126
|
23a2f19e12bc74006e7ba94ba34dfb40c7424f32
| 8,034
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20170301/get_virtual_network_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20170301/get_virtual_network_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20170301/get_virtual_network_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkPeeringResult',
'AwaitableGetVirtualNetworkPeeringResult',
'get_virtual_network_peering',
]
@pulumi.output_type
class GetVirtualNetworkPeeringResult:
"""
Peerings in a virtual network resource.
"""
def __init__(__self__, allow_forwarded_traffic=None, allow_gateway_transit=None, allow_virtual_network_access=None, etag=None, name=None, peering_state=None, provisioning_state=None, remote_virtual_network=None, use_remote_gateways=None):
if allow_forwarded_traffic and not isinstance(allow_forwarded_traffic, bool):
raise TypeError("Expected argument 'allow_forwarded_traffic' to be a bool")
pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic)
if allow_gateway_transit and not isinstance(allow_gateway_transit, bool):
raise TypeError("Expected argument 'allow_gateway_transit' to be a bool")
pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit)
if allow_virtual_network_access and not isinstance(allow_virtual_network_access, bool):
raise TypeError("Expected argument 'allow_virtual_network_access' to be a bool")
pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peering_state and not isinstance(peering_state, str):
raise TypeError("Expected argument 'peering_state' to be a str")
pulumi.set(__self__, "peering_state", peering_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if remote_virtual_network and not isinstance(remote_virtual_network, dict):
raise TypeError("Expected argument 'remote_virtual_network' to be a dict")
pulumi.set(__self__, "remote_virtual_network", remote_virtual_network)
if use_remote_gateways and not isinstance(use_remote_gateways, bool):
raise TypeError("Expected argument 'use_remote_gateways' to be a bool")
pulumi.set(__self__, "use_remote_gateways", use_remote_gateways)
@property
@pulumi.getter(name="allowForwardedTraffic")
def allow_forwarded_traffic(self) -> Optional[bool]:
"""
Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed.
"""
return pulumi.get(self, "allow_forwarded_traffic")
@property
@pulumi.getter(name="allowGatewayTransit")
def allow_gateway_transit(self) -> Optional[bool]:
"""
If gateway links can be used in remote virtual networking to link to this virtual network.
"""
return pulumi.get(self, "allow_gateway_transit")
@property
@pulumi.getter(name="allowVirtualNetworkAccess")
def allow_virtual_network_access(self) -> Optional[bool]:
"""
Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space.
"""
return pulumi.get(self, "allow_virtual_network_access")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringState")
def peering_state(self) -> Optional[str]:
"""
The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'.
"""
return pulumi.get(self, "peering_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the remote virtual network.
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="useRemoteGateways")
def use_remote_gateways(self) -> Optional[bool]:
"""
If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
return pulumi.get(self, "use_remote_gateways")
class AwaitableGetVirtualNetworkPeeringResult(GetVirtualNetworkPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkPeeringResult(
allow_forwarded_traffic=self.allow_forwarded_traffic,
allow_gateway_transit=self.allow_gateway_transit,
allow_virtual_network_access=self.allow_virtual_network_access,
etag=self.etag,
name=self.name,
peering_state=self.peering_state,
provisioning_state=self.provisioning_state,
remote_virtual_network=self.remote_virtual_network,
use_remote_gateways=self.use_remote_gateways)
def get_virtual_network_peering(resource_group_name: Optional[str] = None,
virtual_network_name: Optional[str] = None,
virtual_network_peering_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkPeeringResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_name: The name of the virtual network.
:param str virtual_network_peering_name: The name of the virtual network peering.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkName'] = virtual_network_name
__args__['virtualNetworkPeeringName'] = virtual_network_peering_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20170301:getVirtualNetworkPeering', __args__, opts=opts, typ=GetVirtualNetworkPeeringResult).value
return AwaitableGetVirtualNetworkPeeringResult(
allow_forwarded_traffic=__ret__.allow_forwarded_traffic,
allow_gateway_transit=__ret__.allow_gateway_transit,
allow_virtual_network_access=__ret__.allow_virtual_network_access,
etag=__ret__.etag,
name=__ret__.name,
peering_state=__ret__.peering_state,
provisioning_state=__ret__.provisioning_state,
remote_virtual_network=__ret__.remote_virtual_network,
use_remote_gateways=__ret__.use_remote_gateways)
| 46.439306
| 335
| 0.706995
|
bb31714139073a15b844e7882cc936b9e97c467c
| 4,700
|
py
|
Python
|
capsul/engine/module/axon.py
|
denisri/capsul
|
ea1b41f08ab1acc95e50d90916c1e282807874ca
|
[
"CECILL-B"
] | null | null | null |
capsul/engine/module/axon.py
|
denisri/capsul
|
ea1b41f08ab1acc95e50d90916c1e282807874ca
|
[
"CECILL-B"
] | null | null | null |
capsul/engine/module/axon.py
|
denisri/capsul
|
ea1b41f08ab1acc95e50d90916c1e282807874ca
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Configuration module which links with `Axon <http://brainvisa.info/axon/user_doc>`_
'''
from __future__ import absolute_import
import os
import six
import capsul.engine
import os.path as osp
def init_settings(capsul_engine):
with capsul_engine.settings as settings:
settings.ensure_module_fields('axon',
[dict(name='shared_directory',
type='string',
description=
'Directory where BrainVisa shared data is installed'),
dict(name='user_level',
type='int',
description=
'0: basic, 1: advanced, 2: expert, or more. '
'used to display or hide some advanced features or '
'process parameters that would be confusing to a novice '
'user'),
])
with capsul_engine.settings as session:
config = session.config('axon', 'global')
if not config:
values = {capsul_engine.settings.config_id_field: 'axon',
'shared_directory': None, 'user_level': 0}
session.new_config('axon', 'global', values)
# link with StudyConfig
if hasattr(capsul_engine, 'study_config'):
if 'BrainVISAConfig' not in capsul_engine.study_config.modules:
scmod = capsul_engine.study_config.load_module(
'BrainVISAConfig', {})
scmod.initialize_module()
scmod.initialize_callbacks()
else:
scmod = capsul_engine.study_config.modules['BrainVISAConfig']
scmod.sync_to_engine()
def check_configurations():
'''
Checks if the activated configuration is valid to use BrainVisa and returns
an error message if there is an error or None if everything is good.
'''
shared_dir = capsul.engine.configurations.get(
'axon', {}).get('shared_directory', '')
if not shared_dir:
return 'Axon shared_directory is not found'
return None
def complete_configurations():
'''
Try to automatically set or complete the capsul.engine.configurations for
Axon.
'''
config = capsul.engine.configurations
config = config.setdefault('axon', {})
shared_dir = config.get('shared_directory', None)
if shared_dir is None:
from soma import config as soma_config
shared_dir = soma_config.BRAINVISA_SHARE
if shared_dir:
config['shared_directory'] = shared_dir
def edition_widget(engine, environment):
''' Edition GUI for axon config - see
:class:`~capsul.qt_gui.widgets.settings_editor.SettingsEditor`
'''
from soma.qt_gui.controller_widget import ScrollControllerWidget
from soma.controller import Controller
import types
import traits.api as traits
def validate_config(widget):
controller = widget.controller_widget.controller
with widget.engine.settings as session:
conf = session.config('axon', widget.environment)
values = {'config_id': 'axon', 'user_level': controller.user_level}
if controller.shared_directory in (None, traits.Undefined, ''):
values['shared_directory'] = None
else:
values['shared_directory'] = controller.shared_directory
if conf is None:
session.new_config('axon', widget.environment, values)
else:
for k in ('shared_directory', 'user_level'):
setattr(conf, k, values[k])
controller = Controller()
controller.add_trait('shared_directory',
traits.Directory(desc='Directory where BrainVisa '
'shared data is installed'))
controller.add_trait(
'user_level',
traits.Int(desc=
'0: basic, 1: advanced, 2: expert, or more. '
'used to display or hide some advanced features or '
'process parameters that would be confusing to a novice '
'user'))
conf = engine.settings.select_configurations(
environment, {'axon': 'any'})
if conf:
controller.shared_directory = conf.get(
'capsul.engine.module.axon', {}).get('shared_directory',
traits.Undefined)
controller.user_level = conf.get(
'capsul.engine.module.axon', {}).get('user_level', 0)
widget = ScrollControllerWidget(controller, live=True)
widget.engine = engine
widget.environment = environment
widget.accept = types.MethodType(validate_config, widget)
return widget
| 37.301587
| 83
| 0.610426
|
ab05eef0c045dba96cfead7ace908046cc1351cb
| 16,391
|
py
|
Python
|
nvdb2osm.py
|
EHakansson/nvdb2osm
|
b3228b1c4032be813606314b7efd4cf975983073
|
[
"Unlicense"
] | null | null | null |
nvdb2osm.py
|
EHakansson/nvdb2osm
|
b3228b1c4032be813606314b7efd4cf975983073
|
[
"Unlicense"
] | null | null | null |
nvdb2osm.py
|
EHakansson/nvdb2osm
|
b3228b1c4032be813606314b7efd4cf975983073
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import hashlib
import logging
import pathlib
import zipfile
import glob
import os
import sys
import geopandas
from sortedcontainers import SortedDict
from process_and_resolve import *
from tag_translations import TAG_TRANSLATIONS, process_tag_translations
from nvdb_segment import NvdbSegment, NVDB_GEOMETRY_TAGS
from shapely_utils import shapely_linestring_to_way
from waydb import WayDatabase, print_progress
from osmxml import waydb2osmxml, write_osmxml
from nvdb_ti import time_interval_strings
_log = logging.getLogger("nvdb2osm")
# read_epsg_shapefile()
#
#
def read_epsg_shapefile(directory_or_zip, name):
if zipfile.is_zipfile(directory_or_zip):
zf = zipfile.ZipFile(directory_or_zip)
files = [fn for fn in zf.namelist() if fn.endswith(name + ".shp")]
if len(files) > 0:
filename = files[0]
gdf_filename = "zip://" + str(directory_or_zip) + "!" + filename
else:
pattern = os.path.join(directory_or_zip, "*" + name + ".shp")
files = glob.glob(pattern)
if len(files) > 0:
filename = files[0]
gdf_filename = files[0]
if len(files) == 0:
_log.warning(f"No file name *{name}.shp in {directory_or_zip}")
return None
_log.info(f"Reading file {filename}")
gdf = geopandas.read_file(gdf_filename, encoding='cp1252')
_log.info(f"done ({len(gdf)} segments)")
assert gdf.crs == "epsg:3006", "Expected SWEREF 99 (epsg:3006) geometry"
return gdf
# read_nvdb_shapefile()
#
# Read a NVDB shapefile and apply tag translations.
#
def read_nvdb_shapefile(directory_or_zip, name, tag_translations, nvdb_total_bounds):
gdf = read_epsg_shapefile(directory_or_zip, name)
if gdf is None:
return []
_log.info(f"Parsing {len(gdf)} segments...")
# update global bounding box
bounds = gdf.total_bounds
if bounds[0] < nvdb_total_bounds[0]:
nvdb_total_bounds[0] = bounds[0]
if bounds[1] < nvdb_total_bounds[1]:
nvdb_total_bounds[1] = bounds[1]
if bounds[2] > nvdb_total_bounds[2]:
nvdb_total_bounds[2] = bounds[2]
if bounds[3] > nvdb_total_bounds[3]:
nvdb_total_bounds[3] = bounds[3]
ways = []
skip_count = 0
last_print = 0
for index, row in gdf.iterrows():
if len(gdf) > 50:
last_print = print_progress(last_print, index, len(gdf), progress_text=f"Parsing {len(gdf)} segments")
way = row.to_dict()
way.pop("TILL_DATUM", None)
restore_set = {}
for k in NVDB_GEOMETRY_TAGS:
if k in way:
restore_set[k] = way[k]
del way[k]
geometry = way["geometry"]
del way["geometry"]
if geometry is None:
_log.info(f"Skipping segment without geometry RLID {way['RLID']}")
skip_count += 1
continue
process_tag_translations(way, tag_translations)
if geometry.type == "Point":
points = Point(geometry.x, geometry.y)
else:
points = shapely_linestring_to_way(geometry)
if len(points) == 1:
_log.info(f"Skipping geometry (reduced) to one point {way}")
skip_count += 1
continue
way["geometry"] = points
way.update(restore_set)
nvdbseg = NvdbSegment(way)
nvdbseg.way_id = index
ways.append(nvdbseg)
if skip_count == 0:
_log.info("done")
else:
_log.info(f"done ({len(ways)} segments kept, {skip_count} skipped)")
return ways
# log_version()
#
# Log a unique hash for the code used
#
def log_version():
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
files = [ "geometry_basics.py", "merge_tags.py", "nvdb2osm.py", "nvdb_ti.py", "process_and_resolve.py", "shapely_utils.py", "twodimsearch.py",
"geometry_search.py", "nseg_tools.py", "nvdb_segment.py", "osmxml.py", "proj_xy.py", "tag_translations.py", "waydb.py"
]
_log.info("Checksum for each script file (to be replaced with single version number when script is stable):")
for fname in files:
path = os.path.join(os.path.dirname(__file__), fname)
_log.info(f" {fname:22} MD5: {md5(path)}")
# insert_rlid_elements()
#
# Wrapper to insert a read NVDB layer into the database, logging progress.
#
def insert_rlid_elements(way_db, ways, data_src_name, debug_ways=None, do_snap=True):
_log.info(f"Merging {len(ways)} segments...")
last_print = 0
for idx, way in enumerate(ways):
if len(ways) > 50:
last_print = print_progress(last_print, idx, len(ways), progress_text=f"Merging {len(ways)} segments")
if isinstance(way.way, list):
way_db.insert_rlid_way(way, data_src_name, debug_ways)
else:
did_snap = way_db.insert_rlid_node(way, data_src_name, do_snap)
if not did_snap and do_snap:
append_fixme_value(way.tags, "no nearby reference geometry to snap to")
_log.info("done merging")
def main():
"""The main function, entry point of the program."""
master_geometry_name = "NVDB_DKReflinjetillkomst"
# Note the order how the layers are merged is in part important, see comments
# So be careful if you re-order
line_names = [
# We always do FunkVagklass and GCM_vagtyp/DKCykVgKatier first, as experience tells us
# that if there is a problem with the reference geometry these layers will trigger it.
"NVDB_DKFunkVagklass", # all streets/roads
"NVDB_DKGCM_vagtyp", # all footways/cycleways
"NVDB_DKCykVgKatier", # most often redundant, otherwise complements DKGCM_vagtyp
# just alphabetical order
"NVDB_DKAntal_korfalt2",
"NVDB_DKBarighet",
"NVDB_DKBegrAxelBoggiTryck",
"NVDB_DKBegrBruttovikt",
"NVDB_DKBegrFordBredd",
"NVDB_DKBegrFordLangd",
"NVDB_DKBro_och_tunnel",
"NVDB_DKCirkulationsplats",
"NVDB_DKFarjeled",
"NVDB_DKForbjudenFardriktning",
"NVDB_DKForbudTrafik",
"NVDB_DKGagata",
"NVDB_DKGangfartsomrade",
"NVDB_DKGatunamn",
"NVDB_DKGatutyp",
"NVDB_DKGCM_belyst",
"NVDB_DKGCM_separation",
"NVDB_DKHastighetsgrans",
"NVDB_DKHuvudled",
"NVDB_DKInskrTranspFarligtGods",
"NVDB_DKKollektivkorfalt",
# "NVDB_DKMiljozon", experimental tags, excluding them for now
"NVDB_DKMotortrafikled",
"NVDB_DKMotorvag",
"NVDB_DKOvrigt_vagnamn",
"NVDB_DKRekomVagFarligtGods",
"NVDB_DKSlitlager",
"NVDB_DKTillganglighet",
"NVDB_DKVagbredd",
"NVDB_DKVagnummer",
"TRV_EVB_DKDriftbidrag_statligt",
"VIS_DKFunktionellt_priovagnat",
"VIS_DKOmkorningsforbud",
"VIS_DKSlitlager"
]
point_names = [
"NVDB_DKFarthinder",
"NVDB_DKGCM_passage",
"NVDB_DKHojdhinder45dm",
"NVDB_DKKorsning",
"NVDB_DKStopplikt",
"NVDB_DKVaghinder",
"NVDB_DKVajningsplikt",
"VIS_DKJarnvagskorsning",
"VIS_DKP_ficka",
"VIS_DKRastplats",
]
parser = argparse.ArgumentParser(description='Convert NVDB-data from Trafikverket to OpenStreetMap XML')
parser.add_argument('--dump_layers', help="Write an OSM XML file for each layer", action='store_true')
parser.add_argument('--skip_railway', help="Don't require railway geometry (leads to worse railway crossing handling)", action='store_true')
parser.add_argument('--railway_file', type=pathlib.Path, help="Path to zip or dir with national railway network *.shp (usually Järnvägsnät_grundegenskaper.zip)")
parser.add_argument('--rlid', help="Include RLID in output", action='store_true')
parser.add_argument('--small_road_resolve', help="Specify small road resolve algorithm", default="default")
parser.add_argument('--skip_self_test', help="Skip self tests", action='store_true')
parser.add_argument(
'-d', '--debug',
help="Print debugging statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Be verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
parser.add_argument('shape_file', type=pathlib.Path,
help="zip or dir with NVDB *.shp files")
parser.add_argument('osm_file', type=pathlib.Path,
help="filename of OSM XML output")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=args.loglevel)
_log = logging.getLogger("nvdb2osm")
# When we turn on logging fiona gets a bit talkative
logging.getLogger("fiona.env").setLevel(logging.WARNING)
logging.getLogger("fiona._env").setLevel(logging.WARNING)
logging.getLogger("fiona._shim").setLevel(logging.WARNING)
logging.getLogger("fiona.ogrext").setLevel(logging.WARNING)
logging.getLogger("fiona.collection").setLevel(logging.WARNING)
_log.info(f"args are {args}")
log_version()
write_rlid = args.rlid
debug_dump_layers = args.dump_layers
skip_railway = args.skip_railway
directory_or_zip = args.shape_file
output_filename = args.osm_file
railway_filename = args.railway_file
perform_self_testing = not args.skip_self_test
small_road_resolve_algorithm = args.small_road_resolve
if small_road_resolve_algorithm not in SMALL_ROAD_RESOLVE_ALGORITHMS:
_log.error(f"small_road_resolve parameter must be one of {SMALL_ROAD_RESOLVE_ALGORITHMS}")
sys.exit(1)
if railway_filename is None and not skip_railway:
_log.error("File with national railway geometry not provided (use --railway_file). Can be skipped by adding --skip_railway parameter, but then railway crossings will be somewhat misaligned")
sys.exit(1)
nvdb_total_bounds = [10000000, 10000000, 0, 0] # init to outside max range of SWEREF99
# First setup a complete master geometry and refine it so we have a good geometry to merge the rest of the data with
name = master_geometry_name
ref_ways = read_nvdb_shapefile(directory_or_zip, name, TAG_TRANSLATIONS[name], nvdb_total_bounds)
if debug_dump_layers:
write_osmxml(ref_ways, [], "raw_reference_geometry.osm")
ref_ways = find_overlapping_and_remove_duplicates(name, ref_ways)
way_db = WayDatabase(ref_ways, perform_self_testing)
if debug_dump_layers:
write_osmxml(way_db.get_reference_geometry(), [], "reference_geometry.osm")
all_line_names = line_names
layer_count = len(all_line_names)
layer_idx = 0
for name in all_line_names:
if name is None:
break
ways = read_nvdb_shapefile(directory_or_zip, name, TAG_TRANSLATIONS[name], nvdb_total_bounds)
ways = find_overlapping_and_remove_duplicates(name, ways)
did_insert_new_ref_geometry = way_db.insert_missing_reference_geometry_if_any(ways)
if debug_dump_layers:
if did_insert_new_ref_geometry:
write_osmxml(way_db.get_reference_geometry(), [], "reference_geometry.osm")
write_osmxml(ways, [], name + ".osm")
debug_ways = None
if name == "NVDB_DKBro_och_tunnel":
ways = preprocess_bridges_and_tunnels(ways, way_db)
if debug_dump_layers:
write_osmxml(ways, [], name + "-preproc.osm")
debug_ways = []
insert_rlid_elements(way_db, ways, name, debug_ways=debug_ways)
if perform_self_testing:
way_db.test_segments()
if debug_ways is not None:
write_osmxml(debug_ways, [], name + "-adapted.osm")
layer_idx += 1
_log.info(f"Merged {layer_idx} of {layer_count} line geometry layers")
way_db.join_segments_with_same_tags()
way_db.remove_short_sub_segments()
way_db.setup_geometry_search()
layer_count = len(point_names)
layer_idx = 0
for name in point_names:
if name is None:
break
points = read_nvdb_shapefile(directory_or_zip, name, TAG_TRANSLATIONS[name], nvdb_total_bounds)
points = find_overlapping_and_remove_duplicates(name, points)
do_snap = True
if name == "NVDB_DKGCM_passage":
points = preprocess_footcycleway_crossings(points, way_db)
elif name == "NVDB_DKKorsning":
points = process_street_crossings(points, way_db, name)
elif name == "VIS_DKJarnvagskorsning":
if len(points) > 0:
railways = []
if not skip_railway:
_log.info(f"There are {len(points)} railway crossings, reading railway geometry to have something to snap them to")
gdf = read_epsg_shapefile(railway_filename, "Järnvägsnät_med_grundegenskaper")
if gdf is None:
raise RuntimeError("Railway geometry missing")
_log.info(f"Filtering out railway segments for bounding box {nvdb_total_bounds}...")
for index, row in gdf.iterrows():
if bounds_intersect(row.geometry.bounds, nvdb_total_bounds):
seg = NvdbSegment({ "geometry": shapely_linestring_to_way(row.geometry),
"RLID": "RW-%s" % index
})
railways.append(seg)
_log.info(f"Done ({len(railways)} of {len(gdf)} segments kept)")
if debug_dump_layers:
write_osmxml(railways, [], "local-railway.osm")
points = preprocess_railway_crossings(points, way_db, railways)
elif name == "VIS_DKP_ficka":
points = preprocess_laybys(points, way_db)
do_snap = False
insert_rlid_elements(way_db, points, name, do_snap=do_snap)
layer_idx += 1
_log.debug(f"Merged {layer_idx} of {layer_count} point layers")
if debug_dump_layers:
waydb2osmxml(way_db, "pre-resolve.osm")
sort_multiple_road_names(way_db)
resolve_highways(way_db, small_road_resolve_algorithm)
if small_road_resolve_algorithm not in ['prefer_service_static', 'prefer_track_static']:
upgrade_unclassified_stumps_connected_to_residential(way_db)
guess_upgrade_tracks(way_db)
# converts cycleway way crossings to node crossing, which is optional, both ways to map are correct
simplify_cycleway_crossings(way_db)
simplify_speed_limits(way_db)
remove_redundant_speed_limits(way_db)
cleanup_highway_widths(way_db)
remove_redundant_cycleway_names(way_db)
# Note: simplify_oneway() may reverse some ways, causing functions depending on that ways
# with the same RLID is oriented in the same direction to not work
simplify_oneway(way_db, way_db.point_db)
resolve_lanes(way_db)
final_pass_postprocess_miscellaneous_tags(way_db)
used_keys = SortedDict()
cleanup_used_nvdb_tags(way_db.way_db, used_keys)
cleanup_used_nvdb_tags(way_db.point_db, used_keys)
log_used_and_leftover_keys(used_keys)
_log.info("Time intervals used:")
for str1 in time_interval_strings:
_log.info(f" '{str1}'")
if debug_dump_layers:
waydb2osmxml(way_db, "pre-join.osm")
way_db.join_segments_with_same_tags(join_rlid=True)
if debug_dump_layers:
waydb2osmxml(way_db, "pre-treelike.osm")
way_db.make_way_directions_tree_like()
if debug_dump_layers:
waydb2osmxml(way_db, "pre-simplify.osm")
way_db.simplify_geometry()
_log.info(f"Writing output to {output_filename}")
waydb2osmxml(way_db, output_filename, write_rlid=write_rlid)
_log.info("done writing output")
_log.info("Conversion is complete. Don't expect NVDB data to be perfect or complete.")
_log.info("Remember to validate the OSM file (JOSM validator) and check any fixme tags.")
_log.info("Have fun and merge responsibly!")
if __name__ == "__main__":
main()
| 39.496386
| 198
| 0.663901
|
4a4a58d635c90acd5c88f7a023dcae04a9200dcb
| 7,720
|
py
|
Python
|
hi-ml-histopathology/src/histopathology/configs/classification/DeepSMILEPanda.py
|
kumar-pratik/hi-ml
|
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
|
[
"MIT"
] | null | null | null |
hi-ml-histopathology/src/histopathology/configs/classification/DeepSMILEPanda.py
|
kumar-pratik/hi-ml
|
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
|
[
"MIT"
] | null | null | null |
hi-ml-histopathology/src/histopathology/configs/classification/DeepSMILEPanda.py
|
kumar-pratik/hi-ml
|
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from typing import Any, List
from pathlib import Path
import os
from monai.transforms import Compose
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks import Callback
from health_azure.utils import CheckpointDownloader
from health_azure.utils import get_workspace, is_running_in_azure_ml
from health_ml.networks.layers.attention_layers import AttentionLayer
from health_ml.utils import fixed_paths
from histopathology.datamodules.base_module import CacheMode, CacheLocation
from histopathology.datamodules.panda_module import PandaTilesDataModule
from histopathology.datasets.panda_tiles_dataset import PandaTilesDataset
from health_ml.utils.checkpoint_utils import get_best_checkpoint_path
from histopathology.models.encoders import (
HistoSSLEncoder,
ImageNetEncoder,
ImageNetSimCLREncoder,
SSLEncoder,
)
from histopathology.models.transforms import (
EncodeTilesBatchd,
LoadTilesBatchd,
)
from histopathology.configs.classification.BaseMIL import BaseMIL
from histopathology.datasets.panda_dataset import PandaDataset
class DeepSMILEPanda(BaseMIL):
"""`is_finetune` sets the fine-tuning mode. If this is set, setting cache_mode=CacheMode.NONE takes ~30 min/epoch and
cache_mode=CacheMode.MEMORY, precache_location=CacheLocation.CPU takes ~[5-10] min/epoch.
Fine-tuning with caching completes using batch_size=4, max_bag_size=1000, num_epochs=20, max_num_gpus=1 on PANDA.
"""
def __init__(self, **kwargs: Any) -> None:
default_kwargs = dict(
# declared in BaseMIL:
pool_type=AttentionLayer.__name__,
num_transformer_pool_layers=4,
num_transformer_pool_heads=4,
# average number of tiles is 56 for PANDA
encoding_chunk_size=60,
cache_mode=CacheMode.MEMORY,
precache_location=CacheLocation.CPU,
is_finetune=False,
# declared in DatasetParams:
local_datasets=[Path("/tmp/datasets/PANDA_tiles"), Path("/tmp/datasets/PANDA")],
azure_datasets=["PANDA_tiles", "PANDA"],
# declared in TrainerParams:
max_epochs=200,
# use_mixed_precision = True,
# declared in WorkflowParams:
crossval_count=5,
# declared in OptimizerParams:
l_rate=5e-4,
weight_decay=1e-4,
adam_betas=(0.9, 0.99))
default_kwargs.update(kwargs)
super().__init__(**default_kwargs)
self.class_names = ["ISUP 0", "ISUP 1", "ISUP 2", "ISUP 3", "ISUP 4", "ISUP 5"]
if not is_running_in_azure_ml():
self.num_epochs = 1
self.best_checkpoint_filename = "checkpoint_max_val_auroc"
self.best_checkpoint_filename_with_suffix = (
self.best_checkpoint_filename + ".ckpt"
)
self.checkpoint_folder_path = "outputs/checkpoints/"
best_checkpoint_callback = ModelCheckpoint(
dirpath=self.checkpoint_folder_path,
monitor="val/accuracy",
filename=self.best_checkpoint_filename,
auto_insert_metric_name=False,
mode="max",
)
self.callbacks = best_checkpoint_callback
@property
def cache_dir(self) -> Path:
return Path(
f"/tmp/innereye_cache1/{self.__class__.__name__}-{self.encoder_type}/"
)
def setup(self) -> None:
if self.encoder_type == SSLEncoder.__name__:
from histopathology.configs.run_ids import innereye_ssl_checkpoint_binary
self.downloader = CheckpointDownloader(
aml_workspace=get_workspace(),
run_id=innereye_ssl_checkpoint_binary, # innereye_ssl_checkpoint
checkpoint_filename="best_checkpoint.ckpt", # "last.ckpt",
download_dir="outputs/",
remote_checkpoint_dir=Path("outputs/checkpoints")
)
os.chdir(fixed_paths.repository_root_directory().parent)
self.downloader.download_checkpoint_if_necessary()
self.encoder = self.get_encoder()
if not self.is_finetune:
self.encoder.eval()
def get_data_module(self) -> PandaTilesDataModule:
image_key = PandaTilesDataset.IMAGE_COLUMN
if self.is_finetune:
transform = Compose([LoadTilesBatchd(image_key, progress=True)])
else:
transform = Compose([
LoadTilesBatchd(image_key, progress=True),
EncodeTilesBatchd(image_key, self.encoder, chunk_size=self.encoding_chunk_size)
])
return PandaTilesDataModule(
root_path=self.local_datasets[0],
max_bag_size=self.max_bag_size,
batch_size=self.batch_size,
max_bag_size_inf=self.max_bag_size_inf,
transform=transform,
cache_mode=self.cache_mode,
precache_location=self.precache_location,
cache_dir=self.cache_dir,
crossval_count=self.crossval_count,
crossval_index=self.crossval_index,
)
def get_slides_dataset(self) -> PandaDataset:
return PandaDataset(root=self.local_datasets[1]) # type: ignore
def get_callbacks(self) -> List[Callback]:
return super().get_callbacks() + [self.callbacks]
def get_path_to_best_checkpoint(self) -> Path:
"""
Returns the full path to a checkpoint file that was found to be best during training, whatever criterion
was applied there. This is necessary since for some models the checkpoint is in a subfolder of the checkpoint
folder.
"""
# absolute path is required for registering the model.
absolute_checkpoint_path = Path(fixed_paths.repository_root_directory(),
self.checkpoint_folder_path,
self.best_checkpoint_filename_with_suffix)
if absolute_checkpoint_path.is_file():
return absolute_checkpoint_path
absolute_checkpoint_path_parent = Path(fixed_paths.repository_root_directory().parent,
self.checkpoint_folder_path,
self.best_checkpoint_filename_with_suffix)
if absolute_checkpoint_path_parent.is_file():
return absolute_checkpoint_path_parent
checkpoint_path = get_best_checkpoint_path(Path(self.checkpoint_folder_path))
if checkpoint_path.is_file():
return checkpoint_path
raise ValueError("Path to best checkpoint not found")
class PandaImageNetMIL(DeepSMILEPanda):
def __init__(self, **kwargs: Any) -> None:
super().__init__(encoder_type=ImageNetEncoder.__name__, **kwargs)
class PandaImageNetSimCLRMIL(DeepSMILEPanda):
def __init__(self, **kwargs: Any) -> None:
super().__init__(encoder_type=ImageNetSimCLREncoder.__name__, **kwargs)
class PandaSSLMIL(DeepSMILEPanda):
def __init__(self, **kwargs: Any) -> None:
super().__init__(encoder_type=SSLEncoder.__name__, **kwargs)
class PandaHistoSSLMIL(DeepSMILEPanda):
def __init__(self, **kwargs: Any) -> None:
super().__init__(encoder_type=HistoSSLEncoder.__name__, **kwargs)
| 43.128492
| 121
| 0.655311
|
756240407e9f00b4641ed492b85c60f94ee98002
| 769
|
py
|
Python
|
example/class8.py
|
sano-jin/go-in-ocaml
|
b5e5fca33e194776477a0db389f6e52bdc0a66fe
|
[
"MIT"
] | 1
|
2021-09-24T10:25:40.000Z
|
2021-09-24T10:25:40.000Z
|
example/class8.py
|
sano-jin/go-in-ocaml
|
b5e5fca33e194776477a0db389f6e52bdc0a66fe
|
[
"MIT"
] | null | null | null |
example/class8.py
|
sano-jin/go-in-ocaml
|
b5e5fca33e194776477a0db389f6e52bdc0a66fe
|
[
"MIT"
] | null | null | null |
class MyClass():
name = 'Hoge'
def __init__(self, hige):
self.hige = hige
class Greeting(object):
message = 'Hi! I am MyClass2'
def __init__(self, location):
print('hoge!!')
print(self.__class__.__mro__)
self.location = location
def greet(self):
print(self.message)
print('I am from', self.location)
class MySubClass(Greeting, MyClass):
hige = 'Hige'
def __init__(self, fuga, piyo):
print('Hige!!')
print(Greeting)
print(Greeting.__init__)
print('Hige!!')
Greeting.__init__(self, fuga)
self.piyo = piyo
obj = MySubClass('Tokyo', 2)
print(obj.name)
print(obj.hige)
print(obj.piyo)
print(obj.location)
obj.greet()
print(obj.hogera)
| 20.783784
| 41
| 0.59948
|
8417d3d3b4a44faeb9c4b0b8bcd87e150d6cb8bb
| 28,282
|
py
|
Python
|
mixer.py
|
jggatc/pyj2d
|
a2b47e4d6177e3b6a4dc4b74a0594d6959f2a7dc
|
[
"MIT"
] | 11
|
2015-05-30T13:33:31.000Z
|
2022-01-07T02:38:22.000Z
|
mixer.py
|
jggatc/pyj2d
|
a2b47e4d6177e3b6a4dc4b74a0594d6959f2a7dc
|
[
"MIT"
] | null | null | null |
mixer.py
|
jggatc/pyj2d
|
a2b47e4d6177e3b6a4dc4b74a0594d6959f2a7dc
|
[
"MIT"
] | 1
|
2018-08-05T07:58:13.000Z
|
2018-08-05T07:58:13.000Z
|
#PyJ2D - Copyright (C) 2011 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from javax.sound.sampled import AudioSystem, AudioFormat
from javax.sound.sampled import LineUnavailableException
from java.io import ByteArrayInputStream
from java.io import File, IOException
from java.lang import Thread, Runnable
from java.lang import InterruptedException, IllegalArgumentException
from java.util.concurrent import ConcurrentLinkedDeque
from java.util import NoSuchElementException
from java.util.concurrent.atomic import AtomicBoolean
import jarray
from pyj2d import env
from pyj2d import locals as Const
try:
from pyj2d import Mixer as AudioMixer
except ImportError:
AudioMixer = None
__docformat__ = 'restructuredtext'
class Mixer(Runnable):
"""
**pyj2d.mixer**
* pyj2d.mixer.init
* pyj2d.mixer.quit
* pyj2d.mixer.get_init
* pyj2d.mixer.stop
* pyj2d.mixer.pause
* pyj2d.mixer.unpause
* pyj2d.mixer.fadeout
* pyj2d.mixer.set_num_channels
* pyj2d.mixer.get_num_channels
* pyj2d.mixer.set_reserved
* pyj2d.mixer.find_channel
* pyj2d.mixer.get_busy
* pyj2d.mixer.Sound
* pyj2d.mixer.Channel
* pyj2d.mixer.music
"""
def __init__(self):
self._mixer = None
Sound._mixer = self
Channel._mixer = self
self.Sound = Sound
self.Channel = self._get_channel
self.music = None
self._channel_max = 8
self._channels = {}
self._channel_available = ConcurrentLinkedDeque()
self._channel_available.addAll(list(range(self._channel_max)))
self._channel_active = ConcurrentLinkedDeque()
self._channel_reserved = ConcurrentLinkedDeque()
self._channel_reserved_num = 0
self._thread = None
self.run = self._process
self._active = AtomicBoolean(False)
self._initialized = False
def init(self, frequency=22050, size=-16, channels=2, buffer=4096):
"""
Mixer initialization.
Argument sampled frequency, bit size, channels, and buffer.
Currently implements PCM 16-bit audio.
Plays WAV, AIFF, and AU sampled audio.
To specify BigEndian format of AIFF and AU, use size of float type.
The mixing is done by Mixer.class, compiled with 'javac Mixer.java'.
For JAR creation include with 'jar uvf App.jar pyj2d/Mixer.class'.
"""
if not self._initialized:
encoding = {True:AudioFormat.Encoding.PCM_SIGNED, False:AudioFormat.Encoding.PCM_UNSIGNED}[size<0]
channels = {True:1, False:2}[channels<=1]
framesize = int((abs(size)/8) * channels)
isBigEndian = isinstance(size, float)
self._audio_format = AudioFormat(encoding, int(frequency), int(abs(size)), channels, framesize, int(frequency), isBigEndian)
self._bufferSize = buffer
try:
self._mixer = AudioMixer(self._audio_format, self._bufferSize)
except TypeError:
self._mixer = None
return None
if not self._mixer.isInitialized():
return None
self._byteRate = self._audio_format.getSampleRate() * self._audio_format.getChannels() * (self._audio_format.getSampleSizeInBits()/8)
self._bufferSize = self._mixer.getBufferSize()
self._byteArray = jarray.zeros(self._bufferSize, 'b')
for id in range(self._channel_max):
self._get_channel(id)
self.music = Music()
self._initialized = True
self._thread = Thread(self)
self._thread.start()
return None
def pre_init(self, frequency=22050, size=-16, channels=2, buffer=4096):
"""
Mixer initialization.
"""
self.init(frequency, size, channels, buffer)
return None
def quit(self):
"""
Stop mixer processing and release resources.
"""
self._initialized = False
return None
def _quit(self):
self.stop()
self.music._channel.stop()
try:
self._mixer.quit()
except AttributeError:
pass
self._mixer = None
def get_init(self):
"""
Get the audio format initialized.
"""
if self._initialized:
frequency = int(self._audio_format.sampleRate)
format = self._audio_format.sampleSizeInBits * {True:1,False:-1}[self._audio_format.bigEndian]
channels = self._audio_format.channels
return (frequency, format, channels)
else:
return None
def stop(self):
"""
Stop mixer channels.
"""
for id in self._channel_active.iterator():
if id > -1:
self._channels[id].stop()
return None
def fadeout(self, time):
"""
Fadeout mixer channels in given time.
"""
for id in self._channel_active.iterator():
if id > -1:
self._channels[id].fadeout(time)
return None
def pause(self):
"""
Pause mixer channels.
"""
for id in self._channel_active.iterator():
if id > -1:
self._channels[id].pause()
return None
def unpause(self):
"""
Unpause mixer channels.
"""
for id in self._channel_active.iterator():
if id > -1:
self._channels[id].unpause()
return None
def set_num_channels(self, count):
"""
Set maximum mixer channels.
Argument channel count.
"""
if count >= self._channel_max:
for id in range(self._channel_max, count):
self._get_channel(id)
self._channel_available.add(id)
self._channel_max = count
elif count >= 0:
for id in range(count, self._channel_max):
if id in self._channels:
if self._channels[id] is not None:
self._channels[id].stop()
del self._channels[id]
self._channel_available.remove(id)
self._channel_max = count
return None
def get_num_channels(self):
"""
Get maximum mixer channels.
"""
return self._channel_max
def set_reserved(self, count):
"""
Reserve channel.
Argument reserved channel count.
"""
if count > self._channel_max:
count = self._channel_max
elif count < 0:
count = 0
self._channel_reserved_num = count
self._channel_reserved.clear()
for id in range(self._channel_reserved_num):
self._channel_reserved.add(id)
self._channel_available.remove(id)
return None
def find_channel(self, force=False):
"""
Get an inactive mixer channel.
Optional force attribute return longest running channel if all active.
"""
try:
id = self._channel_available.pop()
self._channel_available.add(id)
return self._channels[id]
except NoSuchElementException:
pass
try:
if self._channel_reserved_num:
id = self._channel_reserved.pop()
self._channel_reserved.add(id)
return self._channels[id]
except NoSuchElementException:
pass
if not force:
return None
longest = None
longest_reserved = None
for id in self._channel_active.iterator():
if id > self._channel_reserved_num-1:
longest = id
break
elif id > -1:
if longest_reserved is None:
longest_reserved = id
if longest is not None:
channel = longest
else:
if longest_reserved is not None:
channel = longest_reserved
else:
channel = 0
return self._channels[channel]
def get_busy(self):
"""
Check if mixer channels are actively processing.
"""
for id in self._channel_active.iterator():
if id > -1:
if self._channels[id]._active:
return True
return False
def _process(self):
while self._initialized:
if not self._active.get():
self._idle()
continue
if self._channel_active.size() > 1:
data, data_len = self._mix(self._channel_active)
if data_len > 0:
self._write(data, data_len)
else:
try:
channel = self._channel_active.getFirst()
data, data_len = self._read(channel)
except NoSuchElementException:
data_len = 0
if data_len > 0:
self._write(data, data_len)
self._quit()
def _idle(self):
try:
self._thread.sleep(10)
except InterruptedException:
Thread.currentThread().interrupt()
self.quit()
def _mix(self, channels):
for id in channels.iterator():
channel = self._channels[id]
if not channel._active.get():
continue
try:
data, data_len, lvol, rvol = channel._get()
except AttributeError:
continue
self._mixer.setAudioData(data, data_len, lvol, rvol)
data_len = self._mixer.getAudioData(self._byteArray)
return self._byteArray, data_len
def _read(self, channel):
channel = self._channels[channel]
if not channel._active.get():
data, data_len = None, 0
else:
try:
data, data_len, lvol, rvol = channel._get()
except AttributeError:
data, data_len = None, 0
if data_len:
if lvol < 1.0 or rvol < 1.0:
data = self._mixer.processVolume(data, data_len, lvol, rvol)
return data, data_len
def _write(self, data, data_len):
try:
self._mixer.write(data, 0, data_len)
except IllegalArgumentException:
nonIntegralByte = data_len % self._audio_format.getFrameSize()
if nonIntegralByte:
data_len -= nonIntegralByte
try:
self._mixer.write(data, 0, data_len)
except (IllegalArgumentException, LineUnavailableException):
pass
except LineUnavailableException:
pass
def _activate_channel(self, id):
if id > self._channel_reserved_num-1:
self._channel_available.remove(id)
else:
self._channel_reserved.remove(id)
self._channel_active.add(id)
self._active.set(True)
def _deactivate_channel(self, id):
self._channel_active.remove(id)
if self._channel_active.isEmpty():
self._active.set(False)
def _restore_channel(self, id):
if id > self._channel_reserved_num-1:
self._channel_available.add(id)
elif id > -1:
self._channel_reserved.add(id)
def _retrieve_channel(self):
try:
id = self._channel_available.pop()
self._channel_active.add(id)
self._active.set(True)
return self._channels[id]
except NoSuchElementException:
return None
def _get_channel(self, id):
if id in self._channels:
return self._channels[id]
else:
return Channel(id)
def _register_channel(self, channel):
id = channel._id
if id < self._channel_max:
self._channels[id] = channel
else:
raise AttributeError("Channel not available.")
class Sound(object):
"""
**pyj2d.mixer.Sound**
* Sound.play
* Sound.stop
* Sound.fadeout
* Sound.set_volume
* Sound.get_volume
* Sound.get_num_channels
* Sound.get_length
* Sound.get_raw
"""
_id = 0
_mixer = None
def __init__(self, sound_file):
self._id = Sound._id
Sound._id += 1
if isinstance(sound_file, str):
_sound_file = self._get_sound_file(sound_file)
self._sound_object = self._get_sound_object(_sound_file)
else:
self._sound_object = sound_file
self._channel = None
self._volume = 1.0
def _get_sound_file(self, sound_file):
try:
_sound_file = env.japplet.getClass().getResource(sound_file.replace('\\','/'))
if not _sound_file:
raise IOError
except:
_sound_file = File(sound_file)
return _sound_file
def _get_sound_object(self, sound_file):
stream = AudioSystem.getAudioInputStream(sound_file)
sound_object = jarray.zeros(stream.available(), 'b')
stream.read(sound_object)
stream.close()
return sound_object
def _get_stream(self):
return ByteArrayInputStream(self._sound_object)
def play(self, loops=0, maxtime=0, fade_ms=0):
"""
Play sound on mixer channel.
Argument loops is repeat number or -1 for continuous,
maxtime is maximum play time, and fade_ms is fade-in time.
"""
self._channel = self._mixer._retrieve_channel()
try:
self._channel._play(self, loops, maxtime, fade_ms)
except AttributeError:
pass
return self._channel
def stop(self):
"""
Stop sound on active channels.
"""
channels = self._mixer._channels
for id in self._mixer._channel_active.iterator():
if id > -1:
try:
if channels[id]._sound._id == self._id:
channels[id].stop()
except AttributeError:
continue
return None
def fadeout(self, time):
"""
Fadeout sound on active channels in given time.
"""
channels = self._mixer._channels
for id in self._mixer._channel_active.iterator():
if id > -1:
try:
if channels[id]._sound._id == self._id:
channels[id].fadeout(time)
except AttributeError:
continue
return None
def set_volume(self, volume):
"""
Set sound volume.
Argument volume of value 0.0 to 1.0.
"""
if volume < 0.0:
volume = 0.0
elif volume > 1.0:
volume = 1.0
self._volume = volume
return None
def get_volume(self):
"""
Get sound volume.
"""
return self._volume
def get_num_channels(self):
"""
Get number of channels sound is active.
"""
channels = self._mixer._channels
channel = 0
for id in self._mixer._channel_active.iterator():
if id > -1:
try:
if channels[id]._sound._id == self._id:
channel += 1
except AttributeError:
continue
return channel
def get_length(self):
"""
Get length of sound sample.
"""
return len(self._sound_object) / self._mixer._byteRate
def get_raw(self):
data = jarray.zeros(len(self._sound_object), 'b')
for i in range(len(self._sound_object)):
data[i] = self._sound_object[i]
return data
class _SoundStream(Sound):
def _get_sound_object(self, sound_file):
return sound_file
def _get_stream(self):
if isinstance(self._sound_object, File):
return AudioSystem.getAudioInputStream(self._sound_object)
else:
return ByteArrayInputStream(self._sound_object)
def get_length(self):
if not isinstance(self._sound_object, File):
return Sound.get_length(self)
else:
stream = self._get_stream()
length = stream.getFrameLength() / stream.getFormat().getFrameRate()
stream.close()
return length
def get_raw(self):
if not isinstance(self._sound_object, File):
return Sound.get_raw(self)
else:
return Sound._get_sound_object(self, self._sound_object)
class Channel(object):
"""
**pyj2d.mixer.Channel**
* Channel.play
* Channel.stop
* Channel.pause
* Channel.unpause
* Channel.fadeout
* Channel.set_volume
* Channel.get_volume
* Channel.get_busy
* Channel.get_sound
* Channel.queue
* Channel.get_queue
* Channel.set_endevent
* Channel.get_endevent
"""
_mixer = None
def __init__(self, id):
self._id = id
self._sound = None
self._stream = None
self._len = self._mixer._bufferSize
self._data = jarray.zeros(self._len, 'b')
self._data_len = 0
self._data_sum = 0
self._data_rate = self._mixer._byteRate / 1000
self._active = AtomicBoolean(False)
self._pause = False
self._loops = 0
self._volume = 1.0
self._lvolume = 1.0
self._rvolume = 1.0
self._queue = None
self._endevent = None
self._maxtime = 0
self._fadein = 0
self._fadeout = 0
self._dvol = 1.0
self._process = False
self._mixer._register_channel(self)
def _set_sound(self, sound):
self._sound = sound
self._stream = sound._get_stream()
def _reset_sound(self):
self._active.set(False)
restart = not self._pause
if not self._sound:
return
try:
sound = self._sound
self._stream.close()
self._set_sound(self._sound)
except AttributeError:
restart = False
if restart:
self._active.set(True)
def _get(self):
try:
self._data_len = self._stream.read(self._data, 0, self._len)
except IOException:
self._data_len = 0
if self._data_len > 0:
self._data_sum += self._data_len
if not self._process:
return (self._data, self._data_len, self._lvolume*self._sound._volume, self._rvolume*self._sound._volume)
if self._maxtime:
self._dvol = 1.0
if self._data_sum > self._maxtime:
self._data_len -= (self._data_sum-self._maxtime)
self._maxtime = 0
self._loops = 0
self._onended()
if self._fadein:
if self._data_sum < self._fadein:
self._dvol = self._data_sum / self._fadein
else:
self._dvol = 1.0
self._fadein = 0
if not (self._maxtime or self._fadeout):
self._process = False
elif self._fadeout:
if self._data_sum < self._fadeout:
self._dvol = 1.0 - (self._data_sum / self._fadeout)
else:
self._dvol = 0.01
self._fadeout = 0
self._loops = 0
self._onended()
return (self._data, self._data_len, self._lvolume*self._sound._volume*self._dvol, self._rvolume*self._sound._volume*self._dvol)
else:
self._data_sum = 0
self._onended()
return (self._data, self._data_len, 1.0, 1.0)
def _play(self, sound, loops, maxtime, fade_ms):
self._set_sound(sound)
self._loops = loops
if maxtime:
self._maxtime = int(maxtime * self._data_rate)
self._process = True
if fade_ms:
self._fadein = fade_ms * self._data_rate
self._process = True
self._data_sum = 0
self._active.set(True)
def play(self, sound, loops=0, maxtime=0, fade_ms=0):
"""
Play sound on channel.
Argument sound to play, loops is repeat number or -1 for continuous,
maxtime is maximum play time, and fade_ms is fade-in time.
"""
if self._sound:
lv, rv = self._lvolume, self._rvolume
self.stop()
self.set_volume(lv, rv)
self._set_sound(sound)
self._loops = loops
if maxtime:
self._maxtime = int(maxtime * self._data_rate)
self._process = True
if fade_ms:
self._fadein = fade_ms * self._data_rate
self._process = True
self._data_sum = 0
self._active.set(True)
self._mixer._activate_channel(self._id)
return None
def _onended(self):
if not self._loops:
if not self._queue:
self.stop()
else:
self.play(self._queue)
else:
self._stream.close()
self._set_sound(self._sound)
self._loops -= 1
def stop(self):
"""
Stop sound on channel.
"""
if not self._active.get() and not self._pause:
return None
self._active.set(False)
self._mixer._deactivate_channel(self._id)
try:
self._stream.close()
self._stream = None
except AttributeError:
pass
self._sound = None
self._queue = None
self._pause = False
self._loops = 0
self._maxtime = 0
self._fadein = 0
self._fadeout = 0
self._volume = 1.0
self._lvolume = 1.0
self._rvolume = 1.0
self._process = False
self._mixer._restore_channel(self._id)
if self._endevent is not None:
env.event.post(self._endevent)
return None
def pause(self):
"""
Pause sound on channel.
"""
if self._active.get():
self._active.set(False)
self._pause = True
return None
def unpause(self):
"""
Unpause sound on channel.
"""
if self._pause:
self._active.set(True)
self._pause = False
return None
def fadeout(self, time):
"""
Stop sound after fade out time.
"""
if self._active.get() or self._pause:
self._fadeout = self._data_sum + (time * self._data_rate)
self._process = True
return None
def set_volume(self, volume, volume2=None):
"""
Set channel volume of sound playing.
Argument volume of value 0.0 to 1.0, setting for both speakers when single, stereo l/r speakers with second value.
"""
if volume < 0.0:
volume = 0.0
elif volume > 1.0:
volume = 1.0
self._lvolume = volume
if volume2:
if volume2 < 0.0:
volume2 = 0.0
elif volume2 > 1.0:
volume2 = 1.0
self._rvolume = volume2
else:
self._rvolume = self._lvolume
self._volume = volume
return None
def get_volume(self):
"""
Get channel volume for current sound.
"""
return self._volume
def get_busy(self):
"""
Check if channel is processing sound.
"""
return self._active.get() or self._pause
def get_sound(self):
"""
Get sound open by channel.
"""
return self._sound
def queue(self, sound):
"""
Queue sound to play after current sound ends.
"""
if not self._sound:
self.play(sound)
else:
self._queue = sound
def get_queue(self):
"""
Return queue sound.
"""
return self._queue
def set_endevent(self, eventType=None):
"""
Set endevent for sound channel.
Argument eventType is event type (eg. USEREVENT+num).
Without an argument resets endevent to NOEVENT type.
"""
if eventType is not None:
if self._endevent is None or self._endevent.type != eventType:
self._endevent = env.event.Event(eventType)
else:
self._endevent = None
def get_endevent(self):
"""
Get endevent type for sound channel.
"""
if self._endevent is not None:
return self._endevent.type
else:
return Const.NOEVENT
class Music(object):
"""
**pyj2d.mixer.music**
* music.load
* music.unload
* music.play
* music.rewind
* music.stop
* music.pause
* music.unpause
* music.fadeout
* music.set_volume
* music.get_volume
* music.get_busy
* music.queue
* music.set_endevent
* music.get_endevent
"""
def __init__(self):
self._channel = Channel(-1)
self._sound = None
self._queue = None
self._volume = 1.0
def load(self, sound_file):
"""
Load music file.
"""
if self._channel.get_busy():
self._channel.stop()
self._sound = _SoundStream(sound_file)
return None
def unload(self):
"""
Unload music file.
"""
self._channel.stop()
self._sound = None
return None
def play(self, loops=0, maxtime=0, fade_ms=0):
"""
Play music.
Argument loops is repeat number or -1 for continuous,
maxtime is maximum play time, and fade_ms is fade-in time.
"""
self._channel.set_volume(self._volume)
self._channel.play(self._sound, loops, maxtime, fade_ms)
if self._queue:
self._channel.queue(self._queue)
self._sound = self._queue
self._queue = None
return None
def rewind(self):
"""
Rewind music.
"""
if self._channel.get_busy():
self._channel._reset_sound()
def stop(self):
"""
Stop music.
"""
self._channel.stop()
return None
def pause(self):
"""
Pause music.
"""
self._channel.pause()
return None
def unpause(self):
"""
Unpause music.
"""
self._channel.unpause()
return None
def fadeout(self, time):
"""
Stop music after fade out time.
"""
self._channel.fadeout(time)
return None
def set_volume(self, volume):
"""
Set music volume.
Argument volume of value 0.0 to 1.0.
"""
if volume < 0.0:
volume = 0.0
elif volume > 1.0:
volume = 1.0
self._volume = volume
if self._channel.get_busy():
self._channel.set_volume(volume)
return None
def get_volume(self):
"""
Get volume for current music.
"""
return self._volume
def get_busy(self):
"""
Check if music playing.
"""
return self._channel.get_busy()
def queue(self, sound_file):
"""
Queue sound to play after current sound ends.
"""
if not self._sound:
return None
if not self._channel.get_busy():
self._queue = _SoundStream(sound_file)
else:
self._sound = _SoundStream(sound_file)
self._channel.queue(self._sound)
def set_endevent(self, eventType=None):
"""
Set endevent for music channel.
Argument eventType is event type (eg. USEREVENT+num).
Without an argument resets endevent to NOEVENT type.
"""
self._channel.set_endevent(eventType)
return None
def get_endevent(self):
"""
Get endevent type for music channel.
"""
return self._channel.get_endevent()
| 29.645702
| 145
| 0.553426
|
605cc0c6ae2982a3410337326357ff6c42aff103
| 1,859
|
py
|
Python
|
trax/models/atari_cnn_test.py
|
Nishant-Pall/trax
|
f714a271111578d4e6b3ac445eef86ca03dc7fa6
|
[
"Apache-2.0"
] | 2
|
2020-08-08T14:38:53.000Z
|
2021-03-04T01:00:17.000Z
|
trax/models/atari_cnn_test.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | null | null | null |
trax/models/atari_cnn_test.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | 1
|
2020-11-01T11:22:54.000Z
|
2020-11-01T11:22:54.000Z
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for trax.models.atari_cnn."""
import functools
import operator as op
import numpy as np
from tensorflow import test
from trax.models import atari_cnn
from trax.shapes import ShapeDtype
class AtariCnnTest(test.TestCase):
def test_computes(self):
hidden_size = (4, 4)
output_size = 6
model = atari_cnn.AtariCnn(
hidden_sizes=hidden_size, output_size=output_size)
B, T, OBS = 2, 2, (28, 28, 3) # pylint: disable=invalid-name
input_signature = ShapeDtype((1, 1) + OBS)
_, _ = model.init(input_signature)
x = np.arange(B * (T + 1) * functools.reduce(op.mul, OBS)).reshape(
B, T + 1, *OBS)
y = model(x)
self.assertEqual((B, T + 1, output_size), y.shape)
class FrameStackMLPTest(test.TestCase):
def test_computes(self):
hidden_size = (4, 4)
output_size = 6
model = atari_cnn.FrameStackMLP(
hidden_sizes=hidden_size, output_size=output_size)
B, T, OBS = 2, 2, 3 # pylint: disable=invalid-name
input_signature = ShapeDtype((1, 1, OBS))
_, _ = model.init(input_signature)
x = np.arange(B * (T + 1) * OBS).reshape(B, T + 1, OBS)
y = model(x)
self.assertEqual((B, T + 1, output_size), y.shape)
if __name__ == '__main__':
test.main()
| 30.983333
| 74
| 0.689618
|
9ac75e630bf7a078dd21528674200b5c7f314b78
| 20,278
|
py
|
Python
|
front-end/testsuite-python-lib/Python-2.3/Lib/test/test_strptime.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
front-end/testsuite-python-lib/Python-2.3/Lib/test/test_strptime.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | null | null | null |
front-end/testsuite-python-lib/Python-2.3/Lib/test/test_strptime.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
"""PyUnit testing against strptime"""
import unittest
import time
import locale
import re
from test import test_support
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.failUnlessEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime."""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple)
comparison = testing[self.time_tuple[tuple_position]]
self.failUnless(strftime_output in testing, "%s: not found in tuple" %
error_msg)
self.failUnless(comparison == strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple)
self.failUnless(strftime_output in self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.failUnless(strftime_output == self.LT_ins.am_pm[position],
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple)
if timezone:
self.failUnless(timezone in self.LT_ins.timezone,
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date_time,
magic_date),
"LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date,
magic_date),
"LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_time,
magic_date),
"LC_time incorrect")
LT = _strptime.LocaleTime(am_pm=('',''))
self.failUnless(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.failUnlessEqual(self.LT_ins.lang, _strptime._getlang())
def test_by_hand_input(self):
# Test passed-in initialization value checks
self.failUnless(_strptime.LocaleTime(f_weekday=range(7)),
"Argument size check for f_weekday failed")
self.assertRaises(TypeError, _strptime.LocaleTime, f_weekday=range(8))
self.assertRaises(TypeError, _strptime.LocaleTime, f_weekday=range(6))
self.failUnless(_strptime.LocaleTime(a_weekday=range(7)),
"Argument size check for a_weekday failed")
self.assertRaises(TypeError, _strptime.LocaleTime, a_weekday=range(8))
self.assertRaises(TypeError, _strptime.LocaleTime, a_weekday=range(6))
self.failUnless(_strptime.LocaleTime(f_month=range(12)),
"Argument size check for f_month failed")
self.assertRaises(TypeError, _strptime.LocaleTime, f_month=range(11))
self.assertRaises(TypeError, _strptime.LocaleTime, f_month=range(13))
self.failUnless(len(_strptime.LocaleTime(f_month=range(12)).f_month) == 13,
"dummy value for f_month not added")
self.failUnless(_strptime.LocaleTime(a_month=range(12)),
"Argument size check for a_month failed")
self.assertRaises(TypeError, _strptime.LocaleTime, a_month=range(11))
self.assertRaises(TypeError, _strptime.LocaleTime, a_month=range(13))
self.failUnless(len(_strptime.LocaleTime(a_month=range(12)).a_month) == 13,
"dummy value for a_month not added")
self.failUnless(_strptime.LocaleTime(am_pm=range(2)),
"Argument size check for am_pm failed")
self.assertRaises(TypeError, _strptime.LocaleTime, am_pm=range(1))
self.assertRaises(TypeError, _strptime.LocaleTime, am_pm=range(3))
self.failUnless(_strptime.LocaleTime(timezone=range(2)),
"Argument size check for timezone failed")
self.assertRaises(TypeError, _strptime.LocaleTime, timezone=range(1))
self.assertRaises(TypeError, _strptime.LocaleTime, timezone=range(3))
def test_unknowntimezone(self):
# Handle timezone set to ('','') properly.
# Fixes bug #661354
locale_time = _strptime.LocaleTime(timezone=('',''))
self.failUnless("%Z" not in locale_time.LC_date,
"when timezone == ('',''), string.replace('','%Z') is "
"occuring")
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_getitem(self):
# Make sure that __getitem__ works properly
self.failUnless(self.time_re['m'],
"Fetching 'm' directive (built-in) failed")
self.failUnless(self.time_re['b'],
"Fetching 'b' directive (built w/ __tupleToRE) failed")
for name in self.locale_time.a_month:
self.failUnless(self.time_re['b'].find(name) != -1,
"Not all abbreviated month names in regex")
self.failUnless(self.time_re['c'],
"Fetching 'c' directive (built w/ format) failed")
self.failUnless(self.time_re['c'].find('%') == -1,
"Conversion of 'c' directive failed; '%' found")
self.assertRaises(KeyError, self.time_re.__getitem__, '1')
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.failUnless(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.failUnless(r"\\d\+" in pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.failUnless(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.failUnless(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.failUnless(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.failUnless(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime(timezone=('',''))
self.failUnless(_strptime.TimeRE(test_locale).pattern("%Z") == '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.failUnless(found, "Escaping failed of format '\w+ 10'")
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_TypeError(self):
# Make sure ValueError is raised when match fails
self.assertRaises(ValueError, _strptime.strptime, data_string="%d",
format="%A")
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime.strptime, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime.strptime(strf_output, "%" + directive)
self.failUnless(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime.strptime(bound, '%y')
expected_result = century + int(bound)
self.failUnless(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime.strptime(strf_output, "%I %p")
self.failUnless(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime.strptime("UTC", "%Z")
self.failUnlessEqual(strp_output.tm_isdst, 0)
strp_output = _strptime.strptime("GMT", "%Z")
self.failUnlessEqual(strp_output.tm_isdst, 0)
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime.strptime(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.failUnless(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.failUnless(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime.strptime(strf_output, "%m %% %Y")
self.failUnless(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.failUnless(_strptime.strptime(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.failUnless(_strptime.strptime(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.failUnless(_strptime.strptime(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime.strptime('1', '%m')
self.failUnless(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime.strptime('12 PM', '%I %p')[3], 12)
eq(_strptime.strptime('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime.strptime('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime.strptime(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime.strptime(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime.strptime(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_main():
test_support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
)
if __name__ == '__main__':
test_main()
| 47.15814
| 86
| 0.580827
|
5a75a9aa1ead7d4cf4dfe4d3489d0f3273583f39
| 7,791
|
py
|
Python
|
komics/dprc_qualcheck.py
|
FreBio/komics
|
3af2d968f7864d5f3767983b236660f268f8d0c0
|
[
"OLDAP-2.2.1"
] | 2
|
2021-10-04T14:10:32.000Z
|
2021-11-10T11:59:39.000Z
|
komics/dprc_qualcheck.py
|
FreBio/komics
|
3af2d968f7864d5f3767983b236660f268f8d0c0
|
[
"OLDAP-2.2.1"
] | 2
|
2021-10-20T11:21:22.000Z
|
2022-03-29T13:47:28.000Z
|
komics/dprc_qualcheck.py
|
FreBio/komics
|
3af2d968f7864d5f3767983b236660f268f8d0c0
|
[
"OLDAP-2.2.1"
] | 1
|
2020-06-11T10:05:32.000Z
|
2020-06-11T10:05:32.000Z
|
'''
TODO:
* consider removing the extension of minicircles, if they start anyway with CSB1?
* include coverage plots pdf
'''
from __future__ import division
import os
import re
import sys
import pysam
import numpy
import subprocess
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
class Error (Exception): pass
class Tests:
def __init__(self,
out,
fasta,
reads1,
reads2,
threads,
cigar,
):
self.out = out
self.input_contigs = os.path.abspath(fasta)
self.reads1 = os.path.abspath(reads1)
self.reads2 = os.path.abspath(reads2)
self.threads = threads
self.reffile = 'tmp.' + self.out + ".extended.fasta"
self.indexfile = str(self.reffile) + ".k8s2"
self.contigstats = self.out + ".contigstats.txt"
self.overalstats = self.out + ".overalstats.txt"
self.sam = 'tmp.' + self.out + ".sam"
self.bam = 'tmp.' + self.out + ".bam"
self.CSB3 = 'GGGGTTGGTGT|ACACCAACCCC|GGGGTTGATGT|ACATCAACCCC'
# for internal use only
self.extend = int(cigar)
self.samflags = [81, 83, 161, 163, 97, 99, 145, 147]
self.MQ = 20
self.cigar = str(cigar) + 'M'
if not os.path.exists(self.input_contigs):
sys.stderr.write('\nERROR: contigs file not found: "' + self.input_contigs + '"\n')
sys.exit(0)
if not os.path.exists(self.reads1):
sys.stderr.write('\nERROR: reads1 file not found: "' + self.reads1 + '"\n')
sys.exit(0)
if not os.path.exists(self.reads2):
sys.stderr.write('\nERROR: reads2 file not found: "' + self.reads2 + '"\n')
sys.exit(0)
def _rev_comp(self, seq):
comp = {"A": "T", "C": "G", "G": "C", "T": "A", "N": "N"}
rc = "".join([comp[x] for x in seq[::-1]])
return rc
def extend_fasta(self):
tmp_result=[]
for minicircle in SeqIO.parse(self.input_contigs, "fasta"):
if re.search('circular', minicircle.id):
newseq = minicircle.seq[(len(minicircle.seq)-self.extend):] + minicircle.seq
tmp_result.append(SeqRecord(newseq, id=minicircle.id, description=""))
else:
tmp_result.append(SeqRecord(minicircle.seq, id=minicircle.id, description=""))
SeqIO.write(tmp_result, self.reffile, "fasta")
def smalt_run(self):
smalt_index_command = [
"smalt index",
"-k 8",
"-s 2",
self.indexfile,
self.reffile
]
sys.stderr.write('Creating smalt index file:\n')
subprocess.call(' '.join(smalt_index_command), shell = True)
smalt_map_command = [
"smalt map",
"-i", str(1500),
"-y", str(0.95),
"-x",
"-r", str(0),
"-n", str(self.threads),
"-o", self.sam,
self.indexfile,
self.reads1,
self.reads2
]
sys.stderr.write('\n\nRunning smalt map:\n')
subprocess.call(' '.join(smalt_map_command), shell = True)
pysam.sort("-o", self.bam, self.sam)
pysam.index(self.bam)
def read_stats(self):
N=0
N_mapped=0
N_properpair=0
N_MQ20=0
N_CSB3=0
N_CSB3_mapped=0
N_CSB3_mapped_pm=0
N_CSB3_mapped_pp=0
samfile=pysam.AlignmentFile(self.bam, "rb")
outfile=samfile.header['SQ']
for ref in list(range(0,len(outfile))):
outfile[ref]['Nmapped']=0
outfile[ref]['NMQ20']=0
outfile[ref]['Nproperpair']=0
outfile[ref]['NCSB3']=0
outfile[ref]['NCSB3pm']=0
outfile[ref]['NCSB3pp']=0
sys.stderr.write('\n\nEstimating read counts.\n')
for read in samfile.fetch(until_eof=True):
N = N+1
if re.findall(self.CSB3, str(read.seq)):
N_CSB3 = N_CSB3 + 1
if not read.is_unmapped:
N_mapped = N_mapped+1
outfile[read.reference_id]['Nmapped'] = outfile[read.reference_id]['Nmapped']+1
if read.mapping_quality >= self.MQ:
N_MQ20 = N_MQ20+1
outfile[read.reference_id]['NMQ20'] = outfile[read.reference_id]['NMQ20']+1
if read.flag in self.samflags:
N_properpair = N_properpair+1
outfile[read.reference_id]['Nproperpair'] = outfile[read.reference_id]['Nproperpair']+1
if re.findall(self.CSB3, str(read.seq)):
N_CSB3_mapped = N_CSB3_mapped+1
outfile[read.reference_id]['NCSB3'] = outfile[read.reference_id]['NCSB3']+1
if re.findall(self.CSB3, str(read.seq)) and read.cigarstring == self.cigar:
N_CSB3_mapped_pm = N_CSB3_mapped_pm+1
outfile[read.reference_id]['NCSB3pm'] = outfile[read.reference_id]['NCSB3pm']+1
if re.findall(self.CSB3, str(read.seq)) and read.flag in self.samflags:
N_CSB3_mapped_pp = N_CSB3_mapped_pp+1
outfile[read.reference_id]['NCSB3pp'] = outfile[read.reference_id]['NCSB3pp']+1
samfile.close()
samfile=pysam.AlignmentFile(self.bam, "rb")
with open(self.overalstats, 'w') as f:
f.write("Number of reads: %s\n" % N)
f.write("Number of mapped reads: %s\n" % N_mapped)
f.write("Number of reads w/ MQ>20: %s\n" % N_MQ20)
f.write("Number of proper pairs: %s\n" % N_properpair)
f.write("Number of CSB3 reads: %s\n" % N_CSB3)
f.write("Number of mapped CSB3 reads: %s\n" % N_CSB3_mapped)
f.write("Number of perfectly matched CSB3 reads: %s\n" % N_CSB3_mapped_pm)
f.write("Number of proper paired CSB3 reads: %s\n" % N_CSB3_mapped_pp)
sys.stderr.write('Estimating read depths.\n')
for contig in list(range(0, len(outfile))):
start_pos = 0
# break_pos = outfile[contig]['LN']
Y = list()
# Z = 0
for pileup in samfile.pileup(outfile[contig]['SN'], max_depth = 5000000):
while pileup.pos != start_pos:
Y.append(0)
start_pos = start_pos+1
# if pileup.pos == break_pos:
# for pread in pileup.pileups:
# if pread.alignment.cigarstring == self.cigar:
# Z=Z+1
#pread.alignment.reference_start, pread.alignment.reference_end
Y.append(pileup.n)
start_pos = start_pos+1
try:
outfile[contig]['meandepth'] = numpy.mean(Y)
outfile[contig]['mediandepth'] = numpy.median(Y)
outfile[contig]['mindepth'] = numpy.min(Y)
outfile[contig]['maxdepth'] = numpy.max(Y)
# outfile[contig]['breakdepth'] = Z
except ValueError:
sys.stderr.write('WARNING: contig %s has zero read depth.\n' % (outfile[contig]['SN']))
samfile.close()
with open(self.contigstats, 'w') as f:
f.write("SN, LN, Nmapped, Nmq20, Npp, NCSB3, NCSB3pm, NCSB3pp, meandepth, mediandepth, mindepth, maxdepth\n")
for item in outfile:
try:
f.write("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" % ( \
item['SN'], \
item['LN'], \
item['Nmapped'], \
item['NMQ20'], \
item['Nproperpair'], \
item['NCSB3'], \
item['NCSB3pm'], \
item['NCSB3pp'], \
round(item['meandepth']), \
item['mediandepth'], \
item['mindepth'], \
item['maxdepth'])
)
except KeyError:
sys.stderr.write('WARNING: contig %s has zero read depth and was not included in output.\n' % item['SN'])
sys.stderr.write('Overal read counts were written to %s\n' % (os.path.abspath(self.overalstats)))
sys.stderr.write('Contig read counts and depths were written to %s\n' % (os.path.abspath(self.contigstats)))
def run(self):
sys.stderr.write('\nEstimating minicircle read depths and counts\n')
sys.stderr.write('============================================\n')
self.extend_fasta()
self.smalt_run()
self.read_stats()
sys.stderr.write('\nkomics qualcheck successfully completed.\n\n')
| 34.021834
| 115
| 0.594661
|
bae27664ab59d8892bbfb9c3a6c7935c2179cc1a
| 3,515
|
py
|
Python
|
code/Experiments/Tutorials/nn-from-scratch/StanfordNN_GPU.py
|
matthijsvk/convNets
|
7e65db7857a4e6abfbcab264953eb7741319de6c
|
[
"Apache-2.0"
] | 53
|
2017-04-18T10:06:20.000Z
|
2021-12-29T21:26:07.000Z
|
code/Experiments/Tutorials/nn-from-scratch/StanfordNN_GPU.py
|
matthijsvk/convNets
|
7e65db7857a4e6abfbcab264953eb7741319de6c
|
[
"Apache-2.0"
] | null | null | null |
code/Experiments/Tutorials/nn-from-scratch/StanfordNN_GPU.py
|
matthijsvk/convNets
|
7e65db7857a4e6abfbcab264953eb7741319de6c
|
[
"Apache-2.0"
] | 20
|
2017-05-03T03:27:09.000Z
|
2022-03-24T07:07:45.000Z
|
import numpy as np
import sklearn
import sklearn.datasets
import theano
import theano.tensor as T
import time
# Use float32 as the default float data type
theano.config.floatX = 'float32'
# Generate a dataset
np.random.seed(0)
train_X, train_y = sklearn.datasets.make_moons(5000, noise=0.20)
train_y_onehot = np.eye(2)[train_y]
# Size definitions
num_examples = len(train_X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
nn_hdim = 1000 # hiden layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = np.float32(0.01) # learning rate for gradient descent
reg_lambda = np.float32(0.01) # regularization strength
# GPU NOTE: Conversion to float32 to store them on the GPU!
X = theano.shared(train_X.astype('float32')) # initialized on the GPU
y = theano.shared(train_y_onehot.astype('float32'))
# GPU NOTE: Conversion to float32 to store them on the GPU!
W1 = theano.shared(np.random.randn(nn_input_dim, nn_hdim).astype('float32'), name='W1')
b1 = theano.shared(np.zeros(nn_hdim).astype('float32'), name='b1')
W2 = theano.shared(np.random.randn(nn_hdim, nn_output_dim).astype('float32'), name='W2')
b2 = theano.shared(np.zeros(nn_output_dim).astype('float32'), name='b2')
# Forward propagation
z1 = X.dot(W1) + b1
a1 = T.tanh(z1)
z2 = a1.dot(W2) + b2
y_hat = T.nnet.softmax(z2)
# The regularization term (optional)
loss_reg = 1. / num_examples * reg_lambda / 2 * (T.sum(T.sqr(W1)) + T.sum(T.sqr(W2)))
# the loss function we want to optimize
loss = T.nnet.categorical_crossentropy(y_hat, y).mean() + loss_reg
# Returns a class prediction
prediction = T.argmax(y_hat, axis=1)
# Gradients
dW2 = T.grad(loss, W2)
db2 = T.grad(loss, b2)
dW1 = T.grad(loss, W1)
db1 = T.grad(loss, b1)
# Note that we removed the input values because we will always use the same shared variable
# GPU NOTE: Removed the input values to avoid copying data to the GPU.
forward_prop = theano.function([], y_hat)
calculate_loss = theano.function([], loss)
predict = theano.function([], prediction)
# GPU NOTE: Removed the input values to avoid copying data to the GPU.
gradient_step = theano.function(
[],
# profile=True,
updates=((W2, W2 - epsilon * dW2),
(W1, W1 - epsilon * dW1),
(b2, b2 - epsilon * db2),
(b1, b1 - epsilon * db1)))
def build_model(num_passes=20000, print_loss=False):
# Re-Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
# GPU NOTE: Conversion to float32 to store them on the GPU!
W1.set_value((np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)).astype('float32'))
b1.set_value(np.zeros(nn_hdim).astype('float32'))
W2.set_value((np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)).astype('float32'))
b2.set_value(np.zeros(nn_output_dim).astype('float32'))
# Gradient descent. For each batch...
for i in xrange(0, num_passes):
# This will update our parameters W2, b2, W1 and b1!
gradient_step()
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print "Loss after iteration %i: %f" % (i, calculate_loss())
# Profiling
# theano.config.profile = True
# theano.config.profile_memory = True
# gradient_step()
# theano.printing.debugprint(gradient_step)
# print gradient_step.profile.summary()
gradient_step()
| 36.237113
| 100
| 0.707824
|
f69a7326c9d2e8d0f6f5df2b5cab52a9558a0f6f
| 3,817
|
py
|
Python
|
tests/test_contacts.py
|
BasementCat/earful
|
a796ae3246f0d3b773be227bacf4260b92d2fa09
|
[
"MIT"
] | null | null | null |
tests/test_contacts.py
|
BasementCat/earful
|
a796ae3246f0d3b773be227bacf4260b92d2fa09
|
[
"MIT"
] | 1
|
2016-02-06T21:12:28.000Z
|
2016-02-06T21:12:28.000Z
|
tests/test_contacts.py
|
BasementCat/earful
|
a796ae3246f0d3b773be227bacf4260b92d2fa09
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from earful.contacts import (
EmailAddress,
PhoneNumber,
HipChat,
Recipient,
Group,
)
class ContactInformationTest(TestCase):
def test_hipchat_defaults(self):
instance = HipChat('contactname', 'roomname')
self.assertEqual(instance.name, 'contactname')
self.assertEqual(instance.weight, 100)
self.assertEqual(instance.room, 'roomname')
self.assertTrue(instance.notify)
self.assertFalse(instance.mention)
def test_hipchat_withuser(self):
instance = HipChat('contactname', 'roomname', username='person')
self.assertFalse(instance.notify)
self.assertTrue(instance.mention)
def test_hipchat_setprefs(self):
instance = HipChat('contactname', 'roomname', username='person', notify=True, mention=False)
self.assertTrue(instance.notify)
self.assertFalse(instance.mention)
class RecipientTest(TestCase):
def test_recipient_defaults(self):
r = Recipient('recipientname')
self.assertEqual(list(r.contacts()), [])
def test_simple_recipient(self):
c = [EmailAddress('emailname', 'emailaddr')]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts()), c)
def test_less_simple_recipient(self):
c = [
EmailAddress('emailname', 'emailaddr'),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts()), c)
def test_contacts_by_type(self):
c = [
EmailAddress('emailname', 'emailaddr'),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(of_type=EmailAddress)), [c[0]])
def test_contacts_with_weight(self):
c = [
EmailAddress('emailname', 'emailaddr'),
EmailAddress('emailname', 'emailaddr', weight=50),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts()), c[1:])
def test_contacts_with_weight_all(self):
c = [
EmailAddress('emailname', 'emailaddr'),
EmailAddress('emailname', 'emailaddr', weight=50),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(include_all=True)), [c[1], c[0], c[2]])
def test_contacts_with_weight_type(self):
c = [
EmailAddress('emailname', 'emailaddr'),
EmailAddress('emailname', 'emailaddr', weight=50),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(of_type=EmailAddress)), [c[1]])
def test_contacts_having(self):
c = [
PhoneNumber('phonename', 'phonenum', sms_ok=False),
PhoneNumber('phonename', 'phonenum', sms_ok=True),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(sms_ok=True)), [c[1]])
class GroupTest(TestCase):
def test_groups(self):
t = EmailAddress('emailname', 'emailaddr')
r = Recipient('recipientname', contacts=[t])
c = Group('c', recipients=[r])
b = Group('b', groups=[c])
a = Group('a', groups=[b])
self.assertEqual(list(a.groups(recursive=False)), [b])
self.assertEqual(list(a.recipients(recursive=False)), [])
self.assertEqual(list(a.contacts(recursive=False)), [])
self.assertEqual(list(a.groups()), [b, c])
self.assertEqual(list(a.recipients()), [r])
self.assertEqual(list(a.contacts()), [t])
| 35.018349
| 100
| 0.616453
|
fc91368049e857c87ae57e01ccb22ce62da3111a
| 1,058
|
py
|
Python
|
hc/front/tests/test_add_victorops.py
|
sumonst21/healthchecks
|
967ca840adee6c72addde46c944c88b1bd5484e2
|
[
"BSD-3-Clause"
] | 4
|
2021-03-27T09:40:00.000Z
|
2021-03-28T06:11:03.000Z
|
hc/front/tests/test_add_victorops.py
|
sumonst21/healthchecks
|
967ca840adee6c72addde46c944c88b1bd5484e2
|
[
"BSD-3-Clause"
] | 7
|
2020-06-05T23:16:36.000Z
|
2022-02-10T08:33:36.000Z
|
hc/front/tests/test_add_victorops.py
|
sumonst21/healthchecks
|
967ca840adee6c72addde46c944c88b1bd5484e2
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T13:36:14.000Z
|
2021-01-29T13:36:14.000Z
|
from hc.api.models import Channel
from hc.test import BaseTestCase
class AddVictorOpsTestCase(BaseTestCase):
url = "/integrations/add_victorops/"
def test_instructions_work(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertContains(r, "incident management system")
def test_it_works(self):
form = {"value": "http://example.org"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, "/integrations/")
c = Channel.objects.get()
self.assertEqual(c.kind, "victorops")
self.assertEqual(c.value, "http://example.org")
self.assertEqual(c.project, self.project)
def test_it_rejects_bad_url(self):
form = {"value": "not an URL"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertContains(r, "Enter a valid URL")
| 34.129032
| 76
| 0.660681
|
b8fab35087a41fec41e2c8eb9f8d38642dae355f
| 6,378
|
py
|
Python
|
evaluation/custom_aristo_mini_solvers/basic_csk_solver.py
|
Aunsiels/CSK
|
c88609bc76d865b4987aaf30ddf1247a2031b1a6
|
[
"MIT"
] | 16
|
2019-11-28T13:26:37.000Z
|
2022-02-09T09:53:10.000Z
|
evaluation/custom_aristo_mini_solvers/basic_csk_solver.py
|
Aunsiels/CSK
|
c88609bc76d865b4987aaf30ddf1247a2031b1a6
|
[
"MIT"
] | 1
|
2021-03-26T20:31:48.000Z
|
2021-07-15T08:52:47.000Z
|
evaluation/custom_aristo_mini_solvers/basic_csk_solver.py
|
Aunsiels/CSK
|
c88609bc76d865b4987aaf30ddf1247a2031b1a6
|
[
"MIT"
] | 3
|
2020-08-14T23:23:25.000Z
|
2021-12-24T14:02:35.000Z
|
"""
This is a skeleton for building your own solver.
You just need to find and fix the two TODOs in this file.
"""
from typing import List
import re
import math
import nltk
import spacy
from wordfreq import word_frequency
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
from quasimodo.spacy_accessor import SpacyAccessor
nlp = spacy.load('en_core_web_sm', disable=["tagger", "parser", "ner"])
class BasicCSKSolver(SolverBase):
def __init__(self):
self.name = "Basic CSK Solver"
self.subject_to_objects = None
self.object_to_subjects = None
self.spacy_accessor = SpacyAccessor()
def solver_info(self) -> str:
return self.name
def get_frequency(self, sentence):
words = sentence.split(" ")
freq = 1.0
for word in words:
freq *= word_frequency(word, "en")
if freq == 0:
freq = 1
return freq
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
# pylint: disable=unused-variable
question_text = question.stem.lower()
choices = question.choices
choice_texts = [x.text for x in choices]
question_text = " ".join(self.spacy_accessor.lemmatize(question_text))
confidences = self.compute_confidences_method1(question_text, choice_texts)
return MultipleChoiceAnswer(
[ChoiceConfidence(choice, confidence)
for choice, confidence in zip(choices, confidences)]
)
def compute_confidences_method2(self, question_text, choices):
confidences: List[float] = []
associations_question = self.get_subject_associated_words(question_text)
for choice in choices:
choice_text = " ".join(self.spacy_accessor.lemmatize(choice))
associations_choice = self.get_subject_associated_words(choice_text)
confidences.append(self.compare_two_associations(associations_question, associations_choice))
return confidences
def compute_confidences_method1(self, question_text, choices):
confidences: List[float] = []
# This frequency helps us to know what are the distinctive elements
w_freq = self.get_frequency_sequences_in_choices(choices)
for choice in choices:
confidence = self.compute_confident_choice_method1(question_text, choice, w_freq)
confidences.append(confidence)
return confidences
def compute_confident_choice_method1(self, question_text, choice, w_freq):
choice_text = " ".join(self.spacy_accessor.lemmatize(choice))
propositions = choice_text.lower().split(" ")
propositions_sub_parts = []
for i in range(len(propositions)):
for j in range(i + 1, len(propositions) + 1):
propositions_sub_parts.append(" ".join(propositions[i:j]))
confidence = 0
for subpart in propositions_sub_parts:
if len(subpart) <= 3:
continue
association_score_pairs = self.subject_to_objects.get(subpart, []).copy()
association_score_pairs += self.object_to_subjects.get(subpart, [])
confidence_temp = self.get_confidence_associations_for_text(association_score_pairs, question_text)
confidence_temp /= w_freq.get(subpart, 1.0)
confidence += confidence_temp
return confidence
def get_subject_associated_words(self, sentence):
association_score_pairs = dict()
maxi = 0.01
n_subjects = 0
for subject in self.subject_to_objects:
if subject in sentence:
n_subjects += 1
for association, score in self.subject_to_objects[subject]:
association_score_pairs[association] = association_score_pairs.get(association, 0.0) + math.exp(score)
maxi = max(maxi, association_score_pairs[association])
for obj in self.object_to_subjects:
if obj in sentence:
n_subjects += 1
for association, score in self.object_to_subjects[obj]:
association_score_pairs[association] = association_score_pairs.get(association, 0.0) + math.exp(score)
maxi = max(maxi, association_score_pairs[association])
if n_subjects != 0:
for association in association_score_pairs:
association_score_pairs[association] /= n_subjects
tokens = nltk.word_tokenize(sentence)
for i in range(len(tokens)):
for j in range(i + 1, len(tokens) + 1):
word = " ".join(tokens[i:j])
association_score_pairs[word] = association_score_pairs.get(word, 0.0) + 10.0 * maxi
return association_score_pairs
def compare_two_associations(self, association_score_pairs0, association_score_pairs1):
score = 0
keys0 = set(association_score_pairs0.keys())
keys1 = set(association_score_pairs1.keys())
final_keys = keys0.intersection(keys1)
for key in final_keys:
score0 = association_score_pairs0[key]
score1 = association_score_pairs1[key]
score += score0 * score1 * key.count(" ")
return score
def get_confidence_associations_for_text(self, association_score_pairs, question_text):
confidence_temp = 0
done = set()
for (association, score) in association_score_pairs:
if re.search("[^0-9a-zA-Z]" + re.escape(association) +
"[^0-9a-zA-Z]", question_text) is not None \
and association not in done:
done.add(association)
confidence_temp += score * 1.0 / self.get_frequency(association)
return confidence_temp
def get_frequency_sequences_in_choices(self, choices):
w_freq = dict()
for i, choice in enumerate(choices):
propositions = self.spacy_accessor.lemmatize(choice.lower())
for i in range(len(propositions)):
for j in range(i + 1, len(propositions) + 1):
sub_part_choice = " ".join(propositions[i:j])
w_freq[sub_part_choice] = w_freq.setdefault(sub_part_choice, 0) + 1
return w_freq
| 42.238411
| 122
| 0.652556
|
4628d63d96aceb8eb9e483d52b6ffde071686e77
| 18,712
|
py
|
Python
|
testing/scripts/run_performance_tests.py
|
lyapple2008/webrtc_simplify
|
c4f9bdc72d8e2648c4f4b1934d22ae94a793b553
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T16:33:09.000Z
|
2020-05-01T09:23:18.000Z
|
testing/scripts/run_performance_tests.py
|
lyapple2008/webrtc_simplify
|
c4f9bdc72d8e2648c4f4b1934d22ae94a793b553
|
[
"BSD-3-Clause"
] | null | null | null |
testing/scripts/run_performance_tests.py
|
lyapple2008/webrtc_simplify
|
c4f9bdc72d8e2648c4f4b1934d22ae94a793b553
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs telemetry benchmarks and gtest perf tests.
This script attempts to emulate the contract of gtest-style tests
invoked via recipes. The main contract is that the caller passes the
argument:
--isolated-script-test-output=[FILENAME]
json is written to that file in the format detailed here:
https://www.chromium.org/developers/the-json-test-results-format
Optional argument:
--isolated-script-test-filter=[TEST_NAMES]
is a double-colon-separated ("::") list of test names, to run just that subset
of tests. This list is forwarded to the run_telemetry_benchmark_as_googletest
script.
This script is intended to be the base command invoked by the isolate,
followed by a subsequent Python script. It could be generalized to
invoke an arbitrary executable.
It currently runs several benchmarks. The benchmarks it will execute are
based on the shard it is running on and the sharding_map_path.
If this is executed with a gtest perf test, the flag --non-telemetry
has to be passed in to the script so the script knows it is running
an executable and not the run_benchmark command.
The results of running the benchmark are put in separate directories per
benchmark. Two files will be present in each directory; perf_results.json, which
is the perf specific results (with unenforced format, could be histogram or
graph json), and test_results.json, which is a JSON test results
format file
https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md
TESTING:
To test changes to this script, please run
cd tools/perf
./run_tests ScriptsSmokeTest.testRunPerformanceTests
"""
import argparse
import json
import os
import shutil
import sys
import time
import tempfile
import traceback
import common
CHROMIUM_SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
PERF_DIR = os.path.join(CHROMIUM_SRC_DIR, 'tools', 'perf')
# Add src/tools/perf where generate_legacy_perf_dashboard_json.py lives
sys.path.append(PERF_DIR)
import generate_legacy_perf_dashboard_json
# Add src/testing/ into sys.path for importing xvfb and test_env.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import xvfb
import test_env
# Unfortunately we need to copy these variables from ../test_env.py.
# Importing it and using its get_sandbox_env breaks test runs on Linux
# (it seems to unset DISPLAY).
CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
SHARD_MAPS_DIRECTORY = os.path.join(
os.path.dirname(__file__), '..', '..', 'tools', 'perf', 'core',
'shard_maps')
class OutputFilePaths(object):
"""Provide paths to where results outputs should be written.
The process_perf_results.py merge script later will pull all of these
together, so that's why they aren't in the standard locations. Also,
note that because of the OBBS (One Build Bot Step), Telemetry
has multiple tests running on a single shard, so we need to prefix
these locations with a directory named by the benchmark name.
"""
def __init__(self, isolated_out_dir, perf_test_name):
self.benchmark_path = os.path.join(isolated_out_dir, perf_test_name)
def SetUp(self):
os.makedirs(self.benchmark_path)
return self
@property
def perf_results(self):
return os.path.join(self.benchmark_path, 'perf_results.json')
@property
def test_results(self):
return os.path.join(self.benchmark_path, 'test_results.json')
@property
def logs(self):
return os.path.join(self.benchmark_path, 'benchmark_log.txt')
def print_duration(step, start):
print 'Duration of %s: %d seconds' % (step, time.time() - start)
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
class GtestCommandGenerator(object):
def __init__(self, options):
self._options = options
def generate(self):
"""Generate the command to run to start the gtest perf test.
Returns:
list of strings, the executable and its arguments.
"""
return ([self._get_executable()] +
self._generate_filter_args() +
self._generate_repeat_args() +
self._generate_also_run_disabled_tests_args() +
self._generate_output_args() +
self._get_passthrough_args()
)
def _get_executable(self):
executable = self._options.executable
if IsWindows():
return r'.\%s.exe' % executable
else:
return './%s' % executable
def _get_passthrough_args(self):
return self._options.passthrough_args
def _generate_filter_args(self):
if self._options.isolated_script_test_filter:
filter_list = common.extract_filter_list(
self._options.isolated_script_test_filter)
return ['--gtest_filter=' + ':'.join(filter_list)]
return []
def _generate_repeat_args(self):
# TODO(crbug.com/920002): Support --isolated-script-test-repeat.
return []
def _generate_also_run_disabled_tests_args(self):
# TODO(crbug.com/920002): Support
# --isolated-script-test-also-run-disabled-tests.
return []
def _generate_output_args(self):
output_args = []
# These flags are to make sure that test output perf metrics in the log.
if not '--verbose' in self._options.passthrough_args:
output_args.append('--verbose')
if (not '--test-launcher-print-test-stdio=always'
in self._options.passthrough_args):
output_args.append('--test-launcher-print-test-stdio=always')
return output_args
def write_legacy_test_results(return_code, output_filepath):
# TODO(crbug.com/920002): Fix to output
# https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md
valid = (return_code == 0)
failures = [] if valid else ['(entire test suite)']
output_json = {
'valid': valid,
'failures': failures,
}
with open(output_filepath, 'w') as fh:
json.dump(output_json, fh)
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
env = os.environ.copy()
# Assume we want to set up the sandbox environment variables all the
# time; doing so is harmless on non-Linux platforms and is needed
# all the time on Linux.
env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
env['CHROME_HEADLESS'] = '1'
return_code = 0
try:
command = command_generator.generate()
if use_xvfb:
return_code = xvfb.run_executable(
command, env, stdoutfile=output_paths.logs)
else:
return_code = test_env.run_command_with_output(
command, env=env, stdoutfile=output_paths.logs)
# Get the correct json format from the stdout to write to the perf
# results file.
results_processor = generate_legacy_perf_dashboard_json.\
LegacyResultsProcessor()
graph_json_string = results_processor.GenerateJsonResults(
output_paths.logs)
with open(output_paths.perf_results, 'w') as fh:
fh.write(graph_json_string)
except Exception:
traceback.print_exc()
return_code = 1
write_legacy_test_results(return_code, output_paths.test_results)
return return_code
class TelemetryCommandGenerator(object):
def __init__(self, benchmark, options,
stories=None, is_reference=False):
self.benchmark = benchmark
self._options = options
self._stories = stories
self._is_reference = is_reference
def generate(self, output_dir):
"""Generate the command to run to start the benchmark.
Args:
output_dir: The directory to configure the command to put output files
into.
Returns:
list of strings, the executable and its arguments.
"""
return ([sys.executable, self._options.executable] +
[self.benchmark] +
self._generate_filter_args() +
self._generate_repeat_args() +
self._generate_also_run_disabled_tests_args() +
self._generate_output_args(output_dir) +
self._generate_story_range_args() +
# passthrough args must be before reference args: crbug.com/928928
self._get_passthrough_args() +
self._generate_reference_build_args()
)
def _get_passthrough_args(self):
return self._options.passthrough_args
def _generate_filter_args(self):
if self._options.isolated_script_test_filter:
filter_list = common.extract_filter_list(
self._options.isolated_script_test_filter)
# Need to convert this to a valid regex.
filter_regex = '(' + '|'.join(filter_list) + ')'
return ['--story-filter=' + filter_regex]
return []
def _generate_repeat_args(self):
if self._options.isolated_script_test_repeat:
return ['--pageset-repeat=' + str(
self._options.isolated_script_test_repeat)]
return []
def _generate_also_run_disabled_tests_args(self):
if self._options.isolated_script_test_also_run_disabled_tests:
return ['--also-run-disabled-tests']
return []
def _generate_output_args(self, output_dir):
return ['--output-format=json-test-results',
'--output-format=histograms',
'--output-dir=' + output_dir]
def _generate_story_range_args(self):
"""Returns arguments that limit the stories to be run inside the benchmark.
"""
range_arguments = []
if self._stories:
if 'begin' in self._stories.keys():
range_arguments.append('--story-shard-begin-index=%d' % (
self._stories['begin']))
if 'end' in self._stories.keys():
range_arguments.append('--story-shard-end-index=%d' % (
self._stories['end']))
return range_arguments
def _generate_reference_build_args(self):
if self._is_reference:
return ['--browser=reference',
'--max-failures=5',
'--output-trace-tag=_ref']
return []
def execute_telemetry_benchmark(
command_generator, output_paths, use_xvfb=False):
start = time.time()
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1'
# Assume we want to set up the sandbox environment variables all the
# time; doing so is harmless on non-Linux platforms and is needed
# all the time on Linux.
env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
return_code = 1
temp_dir = tempfile.mkdtemp('telemetry')
try:
command = command_generator.generate(temp_dir)
if use_xvfb:
return_code = xvfb.run_executable(
command, env=env, stdoutfile=output_paths.logs)
else:
return_code = test_env.run_command_with_output(
command, env=env, stdoutfile=output_paths.logs)
expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
shutil.move(expected_perf_filename, output_paths.perf_results)
expected_results_filename = os.path.join(temp_dir, 'test-results.json')
shutil.move(expected_results_filename, output_paths.test_results)
except Exception:
print ('The following exception may have prevented the code from '
'outputing structured test results and perf results output:')
print traceback.format_exc()
finally:
# Add ignore_errors=True because otherwise rmtree may fail due to leaky
# processes of tests are still holding opened handles to files under
# |tempfile_dir|. For example, see crbug.com/865896
shutil.rmtree(temp_dir, ignore_errors=True)
print_duration('executing benchmark %s' % command_generator.benchmark, start)
if return_code:
return return_code
return 0
def parse_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument('executable', help='The name of the executable to run.')
parser.add_argument(
'--isolated-script-test-output', required=True)
# The following two flags may be passed in sometimes by Pinpoint
# or by the recipe, but they don't do anything. crbug.com/927482.
parser.add_argument(
'--isolated-script-test-chartjson-output', required=False)
parser.add_argument(
'--isolated-script-test-perf-output', required=False)
parser.add_argument(
'--isolated-script-test-filter', type=str, required=False)
# Note that the following three arguments are only supported by Telemetry
# tests right now. See crbug.com/920002.
parser.add_argument(
'--isolated-script-test-repeat', type=int, required=False)
parser.add_argument(
'--isolated-script-test-launcher-retry-limit', type=int, required=False,
choices=[0]) # Telemetry does not support retries. crbug.com/894254#c21
parser.add_argument(
'--isolated-script-test-also-run-disabled-tests',
default=False, action='store_true', required=False)
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
parser.add_argument('--non-telemetry',
help='Type of perf test', type=bool, default=False)
parser.add_argument('--gtest-benchmark-name',
help='Name of the gtest benchmark', type=str,
required=False)
parser.add_argument('--benchmarks',
help='Comma separated list of benchmark names'
' to run in lieu of indexing into our benchmark bot maps',
required=False)
# Some executions may have a different sharding scheme and/or set of tests.
# These files must live in src/tools/perf/core/shard_maps
parser.add_argument('--test-shard-map-filename', type=str, required=False)
parser.add_argument('--run-ref-build',
help='Run test on reference browser', action='store_true')
parser.add_argument('--passthrough-arg',
help='Arguments to pass directly through to the test '
'executable.', action='append',
dest='passthrough_args',
default=[])
options, leftover_args = parser.parse_known_args(args)
options.passthrough_args.extend(leftover_args)
return options
def main():
args = sys.argv[1:] # Skip program name.
options = parse_arguments(args)
isolated_out_dir = os.path.dirname(options.isolated_script_test_output)
overall_return_code = 0
if options.non_telemetry:
command_generator = GtestCommandGenerator(options)
benchmark_name = options.gtest_benchmark_name
# Fallback to use the name of the executable if flag isn't set.
# TODO(crbug.com/870899): remove fallback logic and raise parser error if
# --non-telemetry is set but --gtest-benchmark-name is not set once pinpoint
# is converted to always pass --gtest-benchmark-name flag.
if not benchmark_name:
benchmark_name = options.executable
output_paths = OutputFilePaths(isolated_out_dir, benchmark_name).SetUp()
overall_return_code = execute_gtest_perf_test(
command_generator, output_paths, options.xvfb)
else:
# If the user has supplied a list of benchmark names, execute those instead
# of using the shard map.
if options.benchmarks:
benchmarks = options.benchmarks.split(',')
for benchmark in benchmarks:
output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp()
command_generator = TelemetryCommandGenerator(
benchmark, options)
return_code = execute_telemetry_benchmark(
command_generator, output_paths, options.xvfb)
overall_return_code = return_code or overall_return_code
if options.run_ref_build:
print ('Not running reference build. --run-ref-build argument is only '
'supported for sharded benchmarks. It is simple to support '
'this for unsharded --benchmarks if needed.')
elif options.test_shard_map_filename:
# First determine what shard we are running on to know how to
# index into the bot map to get list of telemetry benchmarks to run.
total_shards = None
shard_index = None
shard_map_path = os.path.join(SHARD_MAPS_DIRECTORY,
options.test_shard_map_filename)
env = os.environ.copy()
if 'GTEST_TOTAL_SHARDS' in env:
total_shards = env['GTEST_TOTAL_SHARDS']
if 'GTEST_SHARD_INDEX' in env:
shard_index = env['GTEST_SHARD_INDEX']
if not total_shards or not shard_index:
raise Exception(
'Sharded Telemetry perf tests must either specify --benchmarks '
'list or have shard indicator environment variables present.')
# Copy sharding map file to isolated_out_dir so that the merge script
# can collect it later.
# TODO(crouleau): Move this step over to merge script
# (process_perf_results.py).
shutil.copyfile(
shard_map_path,
os.path.join(isolated_out_dir, 'benchmarks_shard_map.json'))
with open(shard_map_path) as f:
shard_map = json.load(f)
benchmarks_and_stories = shard_map[shard_index]['benchmarks']
for benchmark, stories in benchmarks_and_stories.iteritems():
# Need to run the benchmark on both latest browser and reference build.
output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp()
command_generator = TelemetryCommandGenerator(
benchmark, options, stories=stories)
return_code = execute_telemetry_benchmark(
command_generator, output_paths, options.xvfb)
overall_return_code = return_code or overall_return_code
if options.run_ref_build:
reference_benchmark_foldername = benchmark + '.reference'
reference_output_paths = OutputFilePaths(
isolated_out_dir, reference_benchmark_foldername).SetUp()
reference_command_generator = TelemetryCommandGenerator(
benchmark, options,
stories=stories, is_reference=True)
# We intentionally ignore the return code of the reference build.
execute_telemetry_benchmark(
reference_command_generator, reference_output_paths,
options.xvfb)
else:
raise Exception('Telemetry tests must provide either a shard map or a '
'--benchmarks list so that we know which stories to run.')
return overall_return_code
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
sys.exit(main())
| 37.878543
| 100
| 0.706873
|
f00d94594a60873f341d60bd5d2a0aa64e52bd47
| 1,096
|
py
|
Python
|
src/core/forms.py
|
lycantropos/shortener
|
f2a68c5b62c2472c0864f5accf97764a91b258c5
|
[
"MIT"
] | null | null | null |
src/core/forms.py
|
lycantropos/shortener
|
f2a68c5b62c2472c0864f5accf97764a91b258c5
|
[
"MIT"
] | null | null | null |
src/core/forms.py
|
lycantropos/shortener
|
f2a68c5b62c2472c0864f5accf97764a91b258c5
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.handlers.wsgi import WSGIRequest
from . import models
from .utils import shorten
class URL(forms.Form):
original = forms.URLField(label='original URL')
@classmethod
def from_request(cls, request: WSGIRequest):
if request.method == 'POST':
form = cls(request.POST)
if form.is_valid():
original_url = form.cleaned_data['original']
short_url = request.build_absolute_uri(shorten(original_url))
original = models.RawURL(address=original_url)
short = models.RawURL(address=short_url)
try:
original.save(force_insert=True)
short.save(force_insert=True)
except Exception as err:
form.add_error(field=None,
error=err)
else:
models.URL.objects.create(original=original,
short=short)
else:
form = cls()
return form
| 34.25
| 77
| 0.541971
|
3bc2b76b58ed35713a214c8784d4ae6333d5685c
| 16,092
|
py
|
Python
|
src/sage/plot/histogram.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | null | null | null |
src/sage/plot/histogram.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | null | null | null |
src/sage/plot/histogram.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | null | null | null |
"""
Histograms
"""
# ****************************************************************************
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.plot.primitive import GraphicPrimitive
from sage.plot.plot import minmax_data, Graphics
from sage.misc.decorators import options
class Histogram(GraphicPrimitive):
"""
Graphics primitive that represents a histogram. This takes
quite a few options as well.
EXAMPLES::
sage: from sage.plot.histogram import Histogram
sage: g = Histogram([1,3,2,0], {}); g
Histogram defined by a data list of size 4
sage: type(g)
<class 'sage.plot.histogram.Histogram'>
sage: opts = { 'bins':20, 'label':'mydata'}
sage: g = Histogram([random() for _ in range(500)], opts); g
Histogram defined by a data list of size 500
We can accept multiple sets of the same length::
sage: g = Histogram([[1,3,2,0], [4,4,3,3]], {}); g
Histogram defined by 2 data lists
"""
def __init__(self, datalist, options):
"""
Initialize a ``Histogram`` primitive along with
its options.
EXAMPLES::
sage: from sage.plot.histogram import Histogram
sage: Histogram([10,3,5], {'width':0.7})
Histogram defined by a data list of size 3
"""
import numpy as np
self.datalist = np.asarray(datalist, dtype=float)
if 'normed' in options:
from sage.misc.superseded import deprecation
deprecation(25260, "the 'normed' option is deprecated. Use 'density' instead.")
if 'linestyle' in options:
from sage.plot.misc import get_matplotlib_linestyle
options['linestyle'] = get_matplotlib_linestyle(
options['linestyle'], return_type='long')
if options.get('range', None):
# numpy.histogram performs type checks on "range" so this must be
# actual floats
options['range'] = [float(x) for x in options['range']]
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
"""
Get minimum and maximum horizontal and vertical ranges
for the Histogram object.
EXAMPLES::
sage: H = histogram([10,3,5], density=True); h = H[0]
sage: h.get_minmax_data() # rel tol 1e-15
{'xmax': 10.0, 'xmin': 3.0, 'ymax': 0.4761904761904765, 'ymin': 0}
sage: G = histogram([random() for _ in range(500)]); g = G[0]
sage: g.get_minmax_data() # random output
{'xmax': 0.99729312925213209, 'xmin': 0.00013024562219410285, 'ymax': 61, 'ymin': 0}
sage: Y = histogram([random()*10 for _ in range(500)], range=[2,8]); y = Y[0]
sage: ymm = y.get_minmax_data(); ymm['xmax'], ymm['xmin']
(8.0, 2.0)
sage: Z = histogram([[1,3,2,0], [4,4,3,3]]); z = Z[0]
sage: z.get_minmax_data()
{'xmax': 4.0, 'xmin': 0, 'ymax': 2, 'ymin': 0}
TESTS::
sage: h = histogram([10,3,5], normed=True)[0]
doctest:warning...:
DeprecationWarning: the 'normed' option is deprecated. Use 'density' instead.
See https://trac.sagemath.org/25260 for details.
sage: h.get_minmax_data()
doctest:warning ...
...VisibleDeprecationWarning: Passing `normed=True` on non-uniform bins has always been broken, and computes neither the probability density function nor the probability mass function. The result is only correct if the bins are uniform, when density=True will produce the same result anyway. The argument will be removed in a future version of numpy.
{'xmax': 10.0, 'xmin': 3.0, 'ymax': 0.476190476190..., 'ymin': 0}
"""
import numpy
# Extract these options (if they are not None) and pass them to
# histogram()
options = self.options()
opt = {}
for key in ('range', 'bins', 'normed', 'density', 'weights'):
try:
value = options[key]
except KeyError:
pass
else:
if value is not None:
opt[key] = value
# check to see if a list of datasets
if not hasattr(self.datalist[0], '__contains__'):
ydata, xdata = numpy.histogram(self.datalist, **opt)
return minmax_data(xdata, [0]+list(ydata), dict=True)
else:
m = {'xmax': 0, 'xmin': 0, 'ymax': 0, 'ymin': 0}
if not options.get('stacked'):
for d in self.datalist:
ydata, xdata = numpy.histogram(d, **opt)
m['xmax'] = max([m['xmax']] + list(xdata))
m['xmin'] = min([m['xmin']] + list(xdata))
m['ymax'] = max([m['ymax']] + list(ydata))
return m
else:
for d in self.datalist:
ydata, xdata = numpy.histogram(d, **opt)
m['xmax'] = max([m['xmax']] + list(xdata))
m['xmin'] = min([m['xmin']] + list(xdata))
m['ymax'] = m['ymax'] + max(list(ydata))
return m
def _allowed_options(self):
"""
Return the allowed options with descriptions for this graphics
primitive. This is used in displaying an error message when the
user gives an option that doesn't make sense.
EXAMPLES::
sage: from sage.plot.histogram import Histogram
sage: g = Histogram( [1,3,2,0], {})
sage: L = list(sorted(g._allowed_options().items()))
sage: L[0]
('align',
'How the bars align inside of each bin. Acceptable values are "left", "right" or "mid".')
sage: L[-1]
('zorder', 'The layer level to draw the histogram')
"""
return {'color': 'The color of the face of the bars or list of colors if multiple data sets are given.',
'edgecolor': 'The color of the border of each bar.',
'alpha': 'How transparent the plot is',
'hue': 'The color of the bars given as a hue.',
'fill': '(True or False, default True) Whether to fill the bars',
'hatch': 'What symbol to fill with - one of "/", "\\", "|", "-", "+", "x", "o", "O", ".", "*"',
'linewidth': 'Width of the lines defining the bars',
'linestyle': "One of 'solid' or '-', 'dashed' or '--', 'dotted' or ':', 'dashdot' or '-.'",
'zorder': 'The layer level to draw the histogram',
'bins': 'The number of sections in which to divide the range. Also can be a sequence of points within the range that create the partition.',
'align': 'How the bars align inside of each bin. Acceptable values are "left", "right" or "mid".',
'rwidth': 'The relative width of the bars as a fraction of the bin width',
'cumulative': '(True or False) If True, then a histogram is computed in which each bin gives the counts in that bin plus all bins for smaller values. Negative values give a reversed direction of accumulation.',
'range': 'A list [min, max] which define the range of the histogram. Values outside of this range are treated as outliers and omitted from counts.',
'normed': 'Deprecated. Use density instead.',
'density': '(True or False) If True, the counts are normalized to form a probability density. (n/(len(x)*dbin)',
'weights': 'A sequence of weights the same length as the data list. If supplied, then each value contributes its associated weight to the bin count.',
'stacked': '(True or False) If True, multiple data are stacked on top of each other.',
'label': 'A string label for each data list given.'}
def _repr_(self):
"""
Return text representation of this histogram graphics primitive.
EXAMPLES::
sage: from sage.plot.histogram import Histogram
sage: g = Histogram( [1,3,2,0], {})
sage: g._repr_()
'Histogram defined by a data list of size 4'
sage: g = Histogram( [[1,1,2,3], [1,3,2,0]], {})
sage: g._repr_()
'Histogram defined by 2 data lists'
"""
L = len(self.datalist)
if not hasattr(self.datalist[0], '__contains__'):
return "Histogram defined by a data list of size {}".format(L)
else:
return "Histogram defined by {} data lists".format(L)
def _render_on_subplot(self, subplot):
"""
Render this bar chart graphics primitive on a matplotlib subplot
object.
EXAMPLES:
This rendering happens implicitly when the following command
is executed::
sage: histogram([1,2,10]) # indirect doctest
Graphics object consisting of 1 graphics primitive
"""
options = self.options()
# check to see if a list of datasets
if not hasattr(self.datalist[0], '__contains__'):
subplot.hist(self.datalist, **options)
else:
subplot.hist(self.datalist.transpose(), **options)
@options(aspect_ratio='automatic', align='mid', weights=None, range=None, bins=10, edgecolor='black')
def histogram(datalist, **options):
"""
Computes and draws the histogram for list(s) of numerical data.
See examples for the many options; even more customization is
available using matplotlib directly.
INPUT:
- ``datalist`` -- A list, or a list of lists, of numerical data
- ``align`` -- (default: "mid") How the bars align inside of each bin.
Acceptable values are "left", "right" or "mid"
- ``alpha`` -- (float in [0,1], default: 1) The transparency of the plot
- ``bins`` -- The number of sections in which to divide the range. Also
can be a sequence of points within the range that create the
partition
- ``color`` -- The color of the face of the bars or list of colors if
multiple data sets are given
- ``cumulative`` -- (boolean - default: False) If True, then
a histogram is computed in which each bin gives the counts in that
bin plus all bins for smaller values. Negative values give
a reversed direction of accumulation
- ``edgecolor`` -- The color of the border of each bar
- ``fill`` -- (boolean - default: True) Whether to fill the bars
- ``hatch`` -- (default: None) symbol to fill the bars with - one of
"/", "\\", "|", "-", "+", "x", "o", "O", ".", "*", "" (or None)
- ``hue`` -- The color of the bars given as a hue. See
:mod:`~sage.plot.colors.hue` for more information on the hue
- ``label`` -- A string label for each data list given
- ``linewidth`` -- (float) width of the lines defining the bars
- ``linestyle`` -- (default: 'solid') Style of the line. One of 'solid'
or '-', 'dashed' or '--', 'dotted' or ':', 'dashdot' or '-.'
- ``density`` -- (boolean - default: False) If True, the result is the
value of the probability density function at the bin, normalized such
that the integral over the range is 1.
- ``range`` -- A list [min, max] which define the range of the
histogram. Values outside of this range are treated as outliers and
omitted from counts
- ``rwidth`` -- (float in [0,1], default: 1) The relative width of the bars
as a fraction of the bin width
- ``stacked`` -- (boolean - default: False) If True, multiple data are
stacked on top of each other
- ``weights`` -- (list) A sequence of weights the same length as the data
list. If supplied, then each value contributes its associated weight
to the bin count
- ``zorder`` -- (integer) the layer level at which to draw the histogram
.. NOTE::
The ``weights`` option works only with a single list. List of lists
representing multiple data are not supported.
EXAMPLES:
A very basic histogram for four data points::
sage: histogram([1, 2, 3, 4], bins=2)
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(histogram([1, 2, 3, 4], bins=2))
We can see how the histogram compares to various distributions.
Note the use of the ``density`` keyword to guarantee the plot
looks like the probability density function::
sage: nv = normalvariate
sage: H = histogram([nv(0, 1) for _ in range(1000)], bins=20, density=True, range=[-5, 5])
sage: P = plot(1/sqrt(2*pi)*e^(-x^2/2), (x, -5, 5), color='red', linestyle='--')
sage: H+P
Graphics object consisting of 2 graphics primitives
.. PLOT::
nv = normalvariate
H = histogram([nv(0, 1) for _ in range(1000)], bins=20, density=True, range=[-5,5 ])
P = plot(1/sqrt(2*pi)*e**(-x**2/2), (x, -5, 5), color='red', linestyle='--')
sphinx_plot(H+P)
There are many options one can use with histograms. Some of these
control the presentation of the data, even if it is boring::
sage: histogram(list(range(100)), color=(1,0,0), label='mydata', rwidth=.5, align="right")
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(histogram(list(range(100)), color=(1,0,0), label='mydata', rwidth=.5, align="right"))
This includes many usual matplotlib styling options::
sage: T = RealDistribution('lognormal', [0, 1])
sage: histogram( [T.get_random_element() for _ in range(100)], alpha=0.3, edgecolor='red', fill=False, linestyle='dashed', hatch='O', linewidth=5)
Graphics object consisting of 1 graphics primitive
.. PLOT::
T = RealDistribution('lognormal', [0, 1])
H = histogram( [T.get_random_element() for _ in range(100)], alpha=0.3, edgecolor='red', fill=False, linestyle='dashed', hatch='O', linewidth=5)
sphinx_plot(H)
::
sage: histogram( [T.get_random_element() for _ in range(100)],linestyle='-.')
Graphics object consisting of 1 graphics primitive
.. PLOT::
T = RealDistribution('lognormal', [0, 1])
sphinx_plot(histogram( [T.get_random_element() for _ in range(100)],linestyle='-.'))
We can do several data sets at once if desired::
sage: histogram([srange(0, 1, .1)*10, [nv(0, 1) for _ in range(100)]], color=['red', 'green'], bins=5)
Graphics object consisting of 1 graphics primitive
.. PLOT::
nv = normalvariate
sphinx_plot(histogram([srange(0, 1, .1)*10, [nv(0, 1) for _ in range(100)]], color=['red', 'green'], bins=5))
We have the option of stacking the data sets too::
sage: histogram([[1, 1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 4, 3, 3, 3, 2, 2, 2] ], stacked=True, color=['blue', 'red'])
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(histogram([[1, 1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 4, 3, 3, 3, 2, 2, 2] ], stacked=True, color=['blue', 'red']))
It is possible to use weights with the histogram as well::
sage: histogram(list(range(10)), bins=3, weights=[1, 2, 3, 4, 5, 5, 4, 3, 2, 1])
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(histogram(list(range(10)), bins=3, weights=[1, 2, 3, 4, 5, 5, 4, 3, 2, 1]))
"""
g = Graphics()
g._set_extra_kwds(Graphics._extract_kwds_for_show(options))
g.add_primitive(Histogram(datalist, options=options))
return g
| 45.07563
| 362
| 0.585384
|
afbe7992ea4d032ff651fe47415072702e88cce4
| 382
|
py
|
Python
|
stockviewer/stockviewer/utils/parse_config.py
|
vyacheslav-bezborodov/skt
|
58551eed497687adec5b56336037613a78cc5b2d
|
[
"MIT"
] | null | null | null |
stockviewer/stockviewer/utils/parse_config.py
|
vyacheslav-bezborodov/skt
|
58551eed497687adec5b56336037613a78cc5b2d
|
[
"MIT"
] | null | null | null |
stockviewer/stockviewer/utils/parse_config.py
|
vyacheslav-bezborodov/skt
|
58551eed497687adec5b56336037613a78cc5b2d
|
[
"MIT"
] | null | null | null |
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
try:
import cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
def parse_config(fp):
tree = etree.parse(fp)
tree.xinclude()
return tree.getroot()
| 20.105263
| 43
| 0.748691
|
0dba20c47aa9a9d91cfe95a57b974d462e26981a
| 2,725
|
py
|
Python
|
discord/types/invite.py
|
Awayume/discord.py
|
a5307af5bfe373d425b184633be81e8157c14abe
|
[
"MIT"
] | null | null | null |
discord/types/invite.py
|
Awayume/discord.py
|
a5307af5bfe373d425b184633be81e8157c14abe
|
[
"MIT"
] | null | null | null |
discord/types/invite.py
|
Awayume/discord.py
|
a5307af5bfe373d425b184633be81e8157c14abe
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Awayume
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, Optional, TypedDict, Union
from .snowflake import Snowflake
from .guild import InviteGuild, _GuildPreviewUnique
from .channel import PartialChannel
from .user import PartialUser
from .appinfo import PartialAppInfo
InviteTargetType = Literal[1, 2]
class _InviteOptional(TypedDict, total=False):
guild: InviteGuild
inviter: PartialUser
target_user: PartialUser
target_type: InviteTargetType
target_application: PartialAppInfo
class _InviteMetadata(TypedDict, total=False):
uses: int
max_uses: int
max_age: int
temporary: bool
created_at: str
expires_at: Optional[str]
class VanityInvite(_InviteMetadata):
code: Optional[str]
class IncompleteInvite(_InviteMetadata):
code: str
channel: PartialChannel
class Invite(IncompleteInvite, _InviteOptional):
...
class InviteWithCounts(Invite, _GuildPreviewUnique):
...
class _GatewayInviteCreateOptional(TypedDict, total=False):
guild_id: Snowflake
inviter: PartialUser
target_type: InviteTargetType
target_user: PartialUser
target_application: PartialAppInfo
class GatewayInviteCreate(_GatewayInviteCreateOptional):
channel_id: Snowflake
code: str
created_at: str
max_age: int
max_uses: int
temporary: bool
uses: bool
class _GatewayInviteDeleteOptional(TypedDict, total=False):
guild_id: Snowflake
class GatewayInviteDelete(_GatewayInviteDeleteOptional):
channel_id: Snowflake
code: str
GatewayInvite = Union[GatewayInviteCreate, GatewayInviteDelete]
| 26.980198
| 75
| 0.780183
|
fc3c417fb46da727ea8722f1eda96ea1b4c31181
| 19,567
|
py
|
Python
|
tests/test_implementations/test_sqlalchemy/api_test/test_patch_many_api.py
|
aebrahim/FastAPIQuickCRUD
|
5c4d1bea2203c30eb21557f18bf9016b55fffa60
|
[
"MIT"
] | 123
|
2021-08-17T01:54:12.000Z
|
2022-03-29T20:41:56.000Z
|
tests/test_implementations/test_sqlalchemy/api_test/test_patch_many_api.py
|
aebrahim/FastAPIQuickCRUD
|
5c4d1bea2203c30eb21557f18bf9016b55fffa60
|
[
"MIT"
] | 10
|
2021-12-28T21:34:20.000Z
|
2022-03-16T13:31:24.000Z
|
tests/test_implementations/test_sqlalchemy/api_test/test_patch_many_api.py
|
aebrahim/FastAPIQuickCRUD
|
5c4d1bea2203c30eb21557f18bf9016b55fffa60
|
[
"MIT"
] | 10
|
2021-08-17T07:37:36.000Z
|
2022-03-31T13:16:55.000Z
|
import json
from collections import OrderedDict
from starlette.testclient import TestClient
from src.fastapi_quickcrud import crud_router_builder
from src.fastapi_quickcrud import CrudMethods
from src.fastapi_quickcrud import sqlalchemy_to_pydantic
from tests.test_implementations.test_sqlalchemy.api_test import get_transaction_session, app, UntitledTable256
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# create_one_model = api_model[CrudMethods.UPSERT_ONE].__dict__
# assert create_one_model['requestModel'] or create_one_model['responseModel']
# create_one_request_model = deepcopy(create_one_model['requestModel'].__dict__['__fields__'])
# create_one_response_model = deepcopy(create_one_model['responseModel'].__dict__['__fields__'])
# Request Test
# assert create_one_request_model.pop('on_conflict', False)
# for k, v in create_one_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Test
# for k, v in create_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_create_one = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_creation_one",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_MANY,
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# create_many_model = api_model[CrudMethods.UPSERT_MANY].__dict__
# assert create_many_model['requestModel'] or create_many_model['responseModel']
# create_many_request_model = deepcopy(create_many_model['requestModel'].__dict__['__fields__'])
# create_many_response_model = deepcopy(create_many_model['responseModel'].__dict__['__fields__'])
#
# # Request Model Test
# assert create_many_request_model.pop('on_conflict', None)
# insert_many_model = create_many_request_model['insert'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in insert_many_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
#
# # Response Model Test
# for k, v in create_many_response_model.items():
# create_many_response_model_item = v.type_.__dict__['__fields__']
# for k, v in create_many_response_model_item.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_create_many = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_creation_many",
tags=["test"]
)
# Response Mode Test
# response_many = create_many_response_model['__root__'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in response_many.items():
# assert not v.required
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.POST_REDIRECT_GET
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# post_redirect_get_model = api_model[CrudMethods.POST_REDIRECT_GET].__dict__
# assert post_redirect_get_model['requestModel'] or post_redirect_get_model['responseModel']
# post_redirect_get_request_model = deepcopy(post_redirect_get_model['requestModel'].__dict__['__fields__'])
# post_redirect_get_response_model = deepcopy(post_redirect_get_model['responseModel'].__dict__['__fields__'])
# Request Model Test
# for k, v in post_redirect_get_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Model Test
# for k, v in post_redirect_get_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# for k, v in post_redirect_get_response_model.items():
# assert v.required
test_post_and_redirect_get = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.FIND_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_get_data = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.PATCH_MANY
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_patch_data = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_patch_many",
tags=["test"]
)
[app.include_router(i) for i in [test_post_and_redirect_get, test_patch_data, test_create_one, test_create_many, test_get_data]]
client = TestClient(app)
primary_key_name = UntitledTable256.primary_key_of_table
unique_fields = UntitledTable256.unique_fields
def test_create_many_and_patch_many():
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
data = { "insert": [ { "bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z", "timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string", "array_value": [ 0 ],
"array_str__value": [ "string" ], "time_value": "18:18:18" , "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string", "time_value": "18:18:18",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "time_value": "18:18:18", "timetz_value": "18:18:18+00:00"},
] }
response = client.post('/test_creation_many', headers=headers, data=json.dumps(data))
assert response.status_code == 201
insert_response_data = response.json()
primary_key_list = [i[primary_key_name] for i in insert_response_data]
min_key = min(primary_key_list)
max_key = max(primary_key_list)
params = {"primary_key____from": min_key,
"primary_key____to": max_key,
"bool_value____list":True,
"char_value____str": 'string%',
"char_value____str_____matching_pattern": 'case_sensitive',
"date_value____from": "2021-07-22",
"date_value____to": "2021-07-25",
"float4_value____from": -1,
"float4_value____to": 2,
"float4_value____list": 0,
"float8_value____from": -1,
"float8_value____to": 2,
"float8_value____list": 0,
"int2_value____from": -1,
"int2_value____to": 9,
"int2_value____list": 0,
"int4_value____from": -1,
"int4_value____to": 9,
"int4_value____list": 0,
"int8_value____from": -1,
"int8_value____to": 9,
"int8_value____list": 0,
"interval_value____from": -1,
"interval_value____to": 9,
"interval_value____list": 0,
"numeric_value____from": -1,
"numeric_value____to": 9,
"numeric_value____list": 0,
"text_value____list": "string",
"time_value____from": '18:18:18',
"time_value____to": '18:18:18',
"time_value____list": '18:18:18',
"timestamp_value_value____from": "2021-07-24T02:54:53.285",
"timestamp_value_value____to": "2021-07-24T02:54:53.285",
"timestamp_value_value____list": "2021-07-24T02:54:53.285",
"timestamptz_value_value____from": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____to": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____list": "2021-07-24T02:54:53.285Z",
"uuid_value_value____list": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"time_value____from": '18:18:18+00:00',
"time_value____to": '18:18:18+00:00',
"time_value____list": '18:18:18+00:00',
"varchar_value____str": 'string',
"varchar_value____str_____matching_pattern": 'case_sensitive',
"varchar_value____list": 'string',
}
from urllib.parse import urlencode
query_string = urlencode(OrderedDict(**params))
update_data = { "bool_value": False, "char_value": "string_u ", "date_value": "2022-07-24",
"float8_value": 10.5, "int2_value": 10, "int4_value": 10, "interval_value": 3600,
"json_value": {'test':'hello'}, "jsonb_value": {'test':'hello'}, "numeric_value": 10, "text_value": "string_update",
"timestamp_value": "2022-07-24T02:54:53.285000", "timestamptz_value": "2022-07-24T02:54:53.285000+00:00",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afb6", "varchar_value": "string", "array_value": [ 1,2,3,4,5 ],
"array_str__value": [ "test" ], "time_value": "18:19:18" , "timetz_value": "18:19:18+00:00"}
response = client.patch(f'/test_patch_many?{query_string}', data= json.dumps(update_data))
response_data = response.json()
assert len(response_data) == 3
for k in response_data:
for i in update_data:
print(i)
print(k[i])
assert k[i] == update_data[i]
| 50.300771
| 143
| 0.608269
|
dcebb4edbba31e71a2c6fffb7694e48ec9a0ea01
| 3,257
|
py
|
Python
|
userbot/plugins/blacklist.py
|
techyminati/DeOXy
|
014efbf6ba4ba31525f996e935279e8918c8ba96
|
[
"Apache-2.0"
] | 2
|
2020-08-02T17:20:12.000Z
|
2020-11-02T23:28:05.000Z
|
userbot/plugins/blacklist.py
|
techyminati/DeOXy
|
014efbf6ba4ba31525f996e935279e8918c8ba96
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/blacklist.py
|
techyminati/DeOXy
|
014efbf6ba4ba31525f996e935279e8918c8ba96
|
[
"Apache-2.0"
] | 6
|
2020-08-17T16:11:18.000Z
|
2020-11-03T16:06:46.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Filters
Available Commands:
.addblacklist
.listblacklist
.rmblacklist"""
import asyncio
import re
import userbot.plugins.sql_helper.blacklist_sql as sql
from global_variables_sql import SYNTAX, MODULE_LIST
from telethon import events, utils
from telethon.tl import types, functions
from userbot.utils import admin_cmd
MODULE_LIST.append("blacklist")
@borg.on(events.NewMessage(incoming=True))
async def on_new_message(event):
# TODO: exempt admins from locks
name = event.raw_text
snips = sql.get_chat_blacklist(event.chat_id)
for snip in snips:
pattern = r"( |^|[^\w])" + re.escape(snip) + r"( |$|[^\w])"
if re.search(pattern, name, flags=re.IGNORECASE):
try:
await event.delete()
except Exception as e:
await event.reply("I do not have DELETE permission in this chat")
sql.rm_from_blacklist(event.chat_id, snip.lower())
break
@borg.on(admin_cmd("addblacklist ((.|\n)*)"))
async def on_add_black_list(event):
text = event.pattern_match.group(1)
to_blacklist = list(set(trigger.strip() for trigger in text.split("\n") if trigger.strip()))
for trigger in to_blacklist:
sql.add_to_blacklist(event.chat_id, trigger.lower())
await event.edit("Added {} triggers to the blacklist in the current chat".format(len(to_blacklist)))
@borg.on(admin_cmd("listblacklist"))
async def on_view_blacklist(event):
all_blacklisted = sql.get_chat_blacklist(event.chat_id)
OUT_STR = "Blacklists in the Current Chat:\n"
if len(all_blacklisted) > 0:
for trigger in all_blacklisted:
OUT_STR += f"👉 {trigger} \n"
else:
OUT_STR = "No BlackLists. Start Saving using `.addblacklist`"
if len(OUT_STR) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "blacklist.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="BlackLists in the Current Chat",
reply_to=event
)
await event.delete()
else:
await event.edit(OUT_STR)
@borg.on(admin_cmd("rmblacklist ((.|\n)*)"))
async def on_delete_blacklist(event):
text = event.pattern_match.group(1)
to_unblacklist = list(set(trigger.strip() for trigger in text.split("\n") if trigger.strip()))
successful = 0
for trigger in to_unblacklist:
if sql.rm_from_blacklist(event.chat_id, trigger.lower()):
successful += 1
await event.edit(f"Removed {successful} / {len(to_unblacklist)} from the blacklist")
SYNTAX.update({
"blacklist": f"\
**Requested Module --> Blacklister**\
\n\nDetailed usage of fuction(s):\
\nUsage: Adds,Removes And Shows Blacklist.\
\n\n**List of Available commands:**\
\n```.addblacklist``` __(Add blacklist)__\n\
\n```.rmblacklist``` __(Remove blacklist)__\n\
\n```.listblacklist``` __(Shows The Black List)__\n\
"
})
| 36.188889
| 104
| 0.660731
|
0b9618f5fa9310b7e1617f4ba4321d9ddf7cd523
| 752
|
py
|
Python
|
arch/migrations/0008_auto_20210122_1155.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 15
|
2021-08-28T18:18:37.000Z
|
2022-03-13T07:48:15.000Z
|
arch/migrations/0008_auto_20210122_1155.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 65
|
2021-08-20T02:37:27.000Z
|
2022-02-07T17:19:23.000Z
|
arch/migrations/0008_auto_20210122_1155.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 31
|
2020-01-09T02:35:29.000Z
|
2022-03-13T07:48:18.000Z
|
# Generated by Django 3.0.7 on 2021-01-22 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('arch', '0007_auto_20210122_0027'),
]
operations = [
migrations.AddField(
model_name='problem',
name='aops_url',
field=models.URLField(blank=True, help_text='Hyperlink to problem on Art of Problem Solving.', max_length=128),
),
migrations.AlterField(
model_name='problem',
name='source',
field=models.CharField(blank=True, help_text='The source of the problem, such as `TSTST 2020/3`.If in doubt on formatting, follow what is written on the handout.', max_length=64),
),
]
| 31.333333
| 191
| 0.62766
|
a3382b98b7b841d0afd3a24db1c02adbddef9226
| 4,358
|
py
|
Python
|
examples/conv_filter_visualization.py
|
IndigenousEngineering/keras_docker_with_NLTK
|
075958831a3f74763ad1e094b3642f5174c7f817
|
[
"MIT"
] | 20
|
2018-07-16T12:43:24.000Z
|
2020-12-15T08:37:35.000Z
|
examples/conv_filter_visualization.py
|
Qily/keras
|
1d81a20292ca6926e595d06a6cd725dbb104a146
|
[
"MIT"
] | 13
|
2018-10-15T10:09:28.000Z
|
2019-01-07T04:48:27.000Z
|
examples/conv_filter_visualization.py
|
Qily/keras
|
1d81a20292ca6926e595d06a6cd725dbb104a146
|
[
"MIT"
] | 22
|
2018-08-30T14:12:06.000Z
|
2021-07-03T19:43:15.000Z
|
'''Visualization of the filters of VGG16, via gradient ascent in input space.
This script can run on CPU in a few minutes.
Results example: http://i.imgur.com/4nj4KjN.jpg
'''
from __future__ import print_function
import numpy as np
import time
from keras.preprocessing.image import save_img
from keras.applications import vgg16
from keras import backend as K
# dimensions of the generated pictures for each filter.
img_width = 128
img_height = 128
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
layer_name = 'block5_conv1'
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# build the VGG16 network with ImageNet weights
model = vgg16.VGG16(weights='imagenet', include_top=False)
print('Model loaded.')
model.summary()
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
kept_filters = []
for filter_index in range(200):
# we only scan through the first 200 filters,
# but there are actually 512 of them
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# we will stich the best 64 filters on a 8 x 8 grid.
n = 8
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
width_margin = (img_width + margin) * i
height_margin = (img_height + margin) * j
stitched_filters[
width_margin: width_margin + img_width,
height_margin: height_margin + img_height, :] = img
# save the result to disk
save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters)
| 31.128571
| 79
| 0.673933
|
e40744db4dae89c5b18eece8fb12c59d83d47105
| 360
|
py
|
Python
|
24_Viral_Advertising.py
|
waditya/HackerRank_Algorithms_Implementation_Challenges
|
343fde9a411d49ab4ef4d96269d2eceef8c985ca
|
[
"Apache-2.0"
] | null | null | null |
24_Viral_Advertising.py
|
waditya/HackerRank_Algorithms_Implementation_Challenges
|
343fde9a411d49ab4ef4d96269d2eceef8c985ca
|
[
"Apache-2.0"
] | null | null | null |
24_Viral_Advertising.py
|
waditya/HackerRank_Algorithms_Implementation_Challenges
|
343fde9a411d49ab4ef4d96269d2eceef8c985ca
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python3
import sys
import math
def viralAdvertising(n):
no_of_shares_on_a_day = [2]
for index in range(n-1):
no_of_shares_on_a_day.append(int(3 * no_of_shares_on_a_day[index]/2))
return(sum(no_of_shares_on_a_day))
if __name__ == "__main__":
n = int(input().strip())
result = viralAdvertising(n)
print(result)
| 21.176471
| 77
| 0.672222
|
bc7c39411c5cc2e042c4a11db4d5f02c7c9fbbd8
| 572
|
py
|
Python
|
setup.py
|
ucarlos/Romanjize
|
b5ab006c2f4ffbee02897cf919d9860acdbbbaf5
|
[
"MIT"
] | null | null | null |
setup.py
|
ucarlos/Romanjize
|
b5ab006c2f4ffbee02897cf919d9860acdbbbaf5
|
[
"MIT"
] | null | null | null |
setup.py
|
ucarlos/Romanjize
|
b5ab006c2f4ffbee02897cf919d9860acdbbbaf5
|
[
"MIT"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {"description": "A small program to convert Japanese tags into English.",
"author": "Ulysses Carlos",
"url": "N/A",
"download_url": "https://github.com/ucarlos/Romanjize",
"author_email": "ucarlos1@student.gsu.edu",
"version": "0.15",
"install_requires": ['nose', 'mutagen', 'google_trans_new'],
"packages": ['Romanjize'],
"scripts": [],
"name": "romanjize"}
setup(**config)
| 30.105263
| 82
| 0.585664
|
cb1284c5d4c3c388f749c1340f1c0859acf2d741
| 7,187
|
py
|
Python
|
pywps/validator/complexvalidator.py
|
mishravikas/pywps-4
|
6d29f0ef73e6f0866df90f777ece27f50e4eea07
|
[
"MIT"
] | 1
|
2019-09-03T11:08:55.000Z
|
2019-09-03T11:08:55.000Z
|
pywps/validator/complexvalidator.py
|
mishravikas/pywps-4
|
6d29f0ef73e6f0866df90f777ece27f50e4eea07
|
[
"MIT"
] | null | null | null |
pywps/validator/complexvalidator.py
|
mishravikas/pywps-4
|
6d29f0ef73e6f0866df90f777ece27f50e4eea07
|
[
"MIT"
] | 1
|
2019-09-03T11:08:58.000Z
|
2019-09-03T11:08:58.000Z
|
"""Validator classes are used for ComplexInputs, to validate the content
"""
from pywps.validator import ValidatorAbstract
from pywps.validator import MODE
from pywps.formats import FORMATS
import os
import mimetypes
class BasicValidator(ValidatorAbstract):
"""Data validator implements ValidatorAbstract class
>>> from pywps.validator import MODE
>>> open('file.json','w').write('{"foo": "bar"}')
>>> class FakeInput:
... source_type = 'file'
... source = 'file.json'
>>> fake_input = FakeInput()
>>> validator = BasicValidator()
>>> validator.validate(input, MODE.NONE)
True
"""
def validate(self, data_input, level=MODE.VERYSTRICT):
"""Perform input validation
"""
return True
def validategml(data_input, mode):
"""GML validation example
>>> import StringIO
>>> class FakeInput(object):
... gml = open('point.gml','w')
... gml.write('''<?xml version="1.0" ?>
... <gml:featureMember xmlns:gml="http://www.opengis.net/gml" xsi:schemaLocation="http://www.opengis.net/gml http://schemas.opengis.net/gml/2.1.2/feature.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><feature:feature xmlns:feature="http://example.com/feature"><feature:geometry><gml:Point><gml:coordinates decimal="." cs=", " ts=" ">-1, 1</gml:coordinates></gml:Point></feature:geometry></feature:feature></gml:featureMember>''')
... gml.close()
... file = 'point.gml'
>>> class fake_data_format(object):
... mimetype = 'application/gml+xml'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategml(fake_input, MODE.SIMPLE)
True
"""
passed = False
if mode >= MODE.NONE:
passed = True
import sys
if mode >= MODE.SIMPLE:
_get_mimetypes()
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = (mtype == data_input.data_format.mimetype == FORMATS['GML'][0])
if mode >= MODE.STRICT:
from osgeo import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GML")
else:
passed = False
if mode >= MODE.VERYSTRICT:
from lxml import etree
from pywps._compat import PY2
if PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
schema_url = data_input.data_format.schema
gmlschema_doc = etree.parse(urlopen(schema_url))
gmlschema = etree.XMLSchema(gmlschema_doc)
return gmlschema.validate(etree.parse(data_input.stream))
return passed
def validategeojson(data_input, mode):
"""GeoJSON validation example
>>> import StringIO
>>> class FakeInput(object):
... json = open('point.geojson','w')
... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''')
... json.close()
... file = 'point.geojson'
>>> class fake_data_format(object):
... mimetype = 'application/geojson'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategeojson(fake_input, MODE.SIMPLE)
True
"""
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
_get_mimetypes()
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = (mtype == data_input.data_format.mimetype == FORMATS['GEOJSON'][0])
if mode >= MODE.STRICT:
from osgeo import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GeoJSON")
else:
passed = False
if mode >= MODE.VERYSTRICT:
import jsonschema
import json
# this code comes from
# https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py
schema_home = os.path.join(_get_schemas_home(), "geojson")
base_schema = os.path.join(schema_home, "geojson.json")
geojson_base = json.load(open(base_schema))
cached_json = {
"http://json-schema.org/geojson/crs.json":
json.load(open(os.path.join(schema_home, "crs.json"))),
"http://json-schema.org/geojson/bbox.json":
json.load(open(os.path.join(schema_home, "bbox.json"))),
"http://json-schema.org/geojson/geometry.json":
json.load(open(os.path.join(schema_home, "geometry.json")))
}
resolver = jsonschema.RefResolver(
"http://json-schema.org/geojson/geojson.json",
geojson_base, store=cached_json)
validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver)
try:
validator.validate(json.loads(data_input.stream.read()))
passed = True
except jsonschema.ValidationError:
passed = False
return passed
def validateshapefile(data_input, mode):
"""ESRI Shapefile validation example
"""
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
_get_mimetypes()
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = (mtype == data_input.data_format.mimetype == FORMATS['SHP'][0])
if mode >= MODE.STRICT:
from osgeo import ogr
import zipfile
z = zipfile.ZipFile(data_input.stream)
shape_name = None
for name in z.namelist():
z.extract(name, data_input.tempdir)
if os.path.splitext(name)[1].lower() == '.shp':
shape_name = name
data_input.stream.close()
if shape_name:
data_source = ogr.Open(os.path.join(data_input.tempdir, shape_name))
if data_source:
passed = (data_source.GetDriver().GetName() == "ESRI Shapefile")
else:
passed = False
return passed
def _get_schemas_home():
"""Get path to schemas directory
"""
return os.path.join(
os.path.abspath(
os.path.dirname(__file__)
),
os.path.pardir,
"schemas")
def _get_mimetypes():
from pywps.formats import FORMATS
mimetypes.init()
for pywps_format in FORMATS:
(mtype, ext) = FORMATS[pywps_format]
# NOTE: strict is set to True: mimetype will be added to system
# mimetypes, zip -> application/zipped-shapefile
mimetypes.add_type(mtype, ext, True)
if __name__ == "__main__":
import doctest
import tempfile, os
from contextlib import contextmanager
from path import path
@contextmanager
def temp_dir():
tmp = path(tempfile.mkdtemp())
try:
yield tmp
finally:
tmp.rmtree()
with temp_dir() as tmp:
os.chdir(tmp)
doctest.testmod()
| 30.45339
| 450
| 0.612077
|
56c32194513c936aaeb240b9cd8032fc5b5b6cfc
| 262
|
py
|
Python
|
clock.py
|
kousuke42/TweetBot
|
4e8e3ad1af034d577bdb67744b2754694cfc14a6
|
[
"MIT"
] | null | null | null |
clock.py
|
kousuke42/TweetBot
|
4e8e3ad1af034d577bdb67744b2754694cfc14a6
|
[
"MIT"
] | null | null | null |
clock.py
|
kousuke42/TweetBot
|
4e8e3ad1af034d577bdb67744b2754694cfc14a6
|
[
"MIT"
] | null | null | null |
from apscheduler.schedulers.blocking import BlockingScheduler
import words
twische = BlockingScheduler()
@twische.scheduled_job('interval',minutes=30)
def timed_job():
words.puttweet()
print("tweetしました。")
if __name__ == "__main__":
twische.start()
| 21.833333
| 61
| 0.751908
|
f0776e758c918fee24f6fee9aa9536e94db3903a
| 7,669
|
py
|
Python
|
augly/audio/utils.py
|
Adib234/AugLy
|
35a6a5de07e64f465b8979e3257218551929e57a
|
[
"MIT"
] | null | null | null |
augly/audio/utils.py
|
Adib234/AugLy
|
35a6a5de07e64f465b8979e3257218551929e57a
|
[
"MIT"
] | null | null | null |
augly/audio/utils.py
|
Adib234/AugLy
|
35a6a5de07e64f465b8979e3257218551929e57a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numbers
from typing import Any, Dict, List, Optional, Tuple, Union
import augly.audio.intensity as audintensity
import augly.utils as utils
import numpy as np
import torch
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
import librosa
import soundfile as sf
import torchaudio
# Use Any because np.random.Generator is not a valid type for pyre
RNG = Any
RNGSeed = Union[int, RNG]
Segment = utils.Segment
def validate_and_load_audio(
audio: Union[str, np.ndarray], sample_rate: int = utils.DEFAULT_SAMPLE_RATE
) -> Tuple[np.ndarray, int]:
"""
If audio is a str, loads the audio as an np.ndarray and returns that & the
audio's sample rate (returned by librosa.load()). If audio is an np.ndarray,
just returns the passed in audio & sample_rate.
"""
if isinstance(audio, str):
local_path = utils.pathmgr.get_local_path(audio)
utils.validate_audio_path(local_path)
return librosa.load(local_path, sr=None, mono=False)
assert isinstance(
audio, np.ndarray
), "Expected type np.ndarray for variable 'audio'"
assert (
isinstance(sample_rate, int) and sample_rate > 0
), "Expected 'sample_rate' to be a positive integer"
return audio, sample_rate
def ret_and_save_audio(
audio: np.ndarray,
output_path: Optional[str],
sample_rate: int = utils.DEFAULT_SAMPLE_RATE,
) -> Tuple[np.ndarray, int]:
if output_path is not None:
utils.validate_output_path(output_path)
try:
# Note: librosa reads in audio data as (num_channels, num_samples),
# but soundfile expects it to be (num_samples, num_channels) when
# writing it out, so we have to swap axes here.
saved_audio = np.swapaxes(audio, 0, 1) if audio.ndim > 1 else audio
sf.write(output_path, saved_audio, sample_rate)
except TypeError:
saved_audio = audio if audio.ndim > 1 else audio.reshape(1, audio.shape[-1])
torchaudio.backend.sox_io_backend.save(
output_path, torch.Tensor(saved_audio), sample_rate, channels_first=True
)
return audio, sample_rate
def check_random_state(seed: Optional[RNGSeed]) -> RNG:
"""
Turn seed into a np.random.RandomState instance
@param seed: instance of RandomState:
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, (np.random.RandomState, np.random.Generator)):
return seed
raise ValueError(
f"{seed} cannot be used to seed a numpy.random.RandomState instance"
)
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
audio: np.ndarray,
sample_rate: int,
dst_audio: np.ndarray,
dst_sample_rate: int,
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected 'metadata' to be set to None or of type list"
src_duration = audio.shape[-1] / sample_rate
dst_duration = dst_audio.shape[-1] / dst_sample_rate
src_segments, dst_segments = compute_segments(
function_name, src_duration, dst_duration, metadata, **kwargs
)
metadata.append(
{
"name": function_name,
"src_duration": src_duration,
"dst_duration": dst_duration,
"src_num_channels": 1 if audio.ndim == 1 else audio.shape[0],
"dst_num_channels": 1 if dst_audio.ndim == 1 else dst_audio.shape[0],
"src_sample_rate": sample_rate,
"dst_sample_rate": dst_sample_rate,
"src_segments": [src_segment._asdict() for src_segment in src_segments],
"dst_segments": [dst_segment._asdict() for dst_segment in dst_segments],
**kwargs,
}
)
intensity_kwargs = {"metadata": metadata[-1], **kwargs}
metadata[-1]["intensity"] = getattr(
audintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs)
def compute_changed_segments(
name: str,
src_segments: List[Segment],
dst_segments: List[Segment],
src_duration: float,
dst_duration: float,
speed_factor: float,
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
"""
This function performs the logic of computing the new matching segments based
on the old ones, for the set of transforms that temporally change the video.
Returns the lists of new src segments & dst segments, respectively.
"""
new_src_segments, new_dst_segments = [], []
for src_segment, dst_segment in zip(src_segments, dst_segments):
if name == "insert_in_background":
offset = kwargs["offset_factor"] * kwargs["background_duration"]
# The matching segments are just offset in the dst audio by the amount
# of background video inserted before the src video.
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(dst_segment.start + offset, dst_segment.end + offset)
)
elif name == "clip":
crop_start = kwargs["offset_factor"] * src_duration
crop_end = crop_start + kwargs["duration_factor"] * src_duration
utils.compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
)
elif name in ["speed", "tempo", "time_stretch"]:
# speed_factor > 1 if speedup, < 1 if slow down
speed_factor = src_duration / dst_duration
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(
dst_segment.start / speed_factor, dst_segment.end / speed_factor
)
)
return new_src_segments, new_dst_segments
def compute_segments(
name: str,
src_duration: float,
dst_duration: float,
metadata: List[Dict[str, Any]],
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
speed_factor = 1.0
if not metadata:
src_segments = [Segment(0.0, src_duration)]
dst_segments = [Segment(0.0, src_duration)]
else:
src_segments = [
Segment(segment_dict["start"], segment_dict["end"])
for segment_dict in metadata[-1]["src_segments"]
]
dst_segments = [
Segment(segment_dict["start"], segment_dict["end"])
for segment_dict in metadata[-1]["dst_segments"]
]
for meta in metadata:
if meta["name"] in ["speed", "tempo"]:
speed_factor *= meta["factor"]
if meta["name"] == "time_stretch":
speed_factor *= meta["rate"]
if name in [
"insert_in_background",
"clip",
"speed",
"tempo",
"time_stretch",
]:
return compute_changed_segments(
name,
src_segments,
dst_segments,
src_duration,
dst_duration,
speed_factor,
**kwargs,
)
else:
return src_segments, dst_segments
| 33.489083
| 88
| 0.630069
|
0964f0e44ce15e3ce659f1c7df848613e3c0875b
| 3,050
|
py
|
Python
|
sdk/python/pulumi_oci/core/get_services.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/core/get_services.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/core/get_services.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetServicesResult',
'AwaitableGetServicesResult',
'get_services',
]
@pulumi.output_type
class GetServicesResult:
"""
A collection of values returned by getServices.
"""
def __init__(__self__, filters=None, id=None, services=None):
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if services and not isinstance(services, list):
raise TypeError("Expected argument 'services' to be a list")
pulumi.set(__self__, "services", services)
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetServicesFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def services(self) -> Sequence['outputs.GetServicesServiceResult']:
"""
The list of services.
"""
return pulumi.get(self, "services")
class AwaitableGetServicesResult(GetServicesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServicesResult(
filters=self.filters,
id=self.id,
services=self.services)
def get_services(filters: Optional[Sequence[pulumi.InputType['GetServicesFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServicesResult:
"""
This data source provides the list of Services in Oracle Cloud Infrastructure Core service.
Lists the available [Service](https://docs.cloud.oracle.com/iaas/api/#/en/iaas/latest/Service/) objects that you can enable for a
service gateway in this region.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_services = oci.core.get_services()
```
"""
__args__ = dict()
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:core/getServices:getServices', __args__, opts=opts, typ=GetServicesResult).value
return AwaitableGetServicesResult(
filters=__ret__.filters,
id=__ret__.id,
services=__ret__.services)
| 31.443299
| 133
| 0.662295
|
08901e2671092bf38e730b364bab9f8105421ffd
| 1,037
|
py
|
Python
|
py/lcEasy_0001_Two_Sums.py
|
stansuo/leetcode-py
|
7a8afccfe89bcb957163c1b8b77be0c86f9e1c2a
|
[
"MIT"
] | null | null | null |
py/lcEasy_0001_Two_Sums.py
|
stansuo/leetcode-py
|
7a8afccfe89bcb957163c1b8b77be0c86f9e1c2a
|
[
"MIT"
] | null | null | null |
py/lcEasy_0001_Two_Sums.py
|
stansuo/leetcode-py
|
7a8afccfe89bcb957163c1b8b77be0c86f9e1c2a
|
[
"MIT"
] | null | null | null |
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# Example:
# Given nums = [2, 7, 11, 15], target = 9,
# Because nums[0] + nums[1] = 2 + 7 = 9,
# return [0, 1].
# class Solution:
# def twoSum(self, nums: List[int], target: int) -> List[int]:
# for i in range(len(nums)):
# for j in range(i+1, len(nums)):
# # print(i, j)
# if nums[i] + nums[j] == target:
# return [i, j]
# solution = twoSum([2, 7, 11, 15], 9)
# print(solution)
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
h = {}
for i, num in enumerate(nums):
n = target - num
if n not in h:
h[num] = i
else:
return [h[n], i]
| 28.027027
| 109
| 0.502411
|
312c964d9387db62c0dc3abf5beaf282d0294575
| 989
|
py
|
Python
|
Algorithms/14_disjoint_set/disjoint-set.py
|
shouvikch97/Hacktoberfest-2k20
|
293bdd75ee51e1e5cdbdd03f6e7beca063280e34
|
[
"MIT"
] | 18
|
2020-10-01T05:08:18.000Z
|
2021-10-01T13:51:18.000Z
|
Algorithms/14_disjoint_set/disjoint-set.py
|
shouvikch97/Hacktoberfest-2k20
|
293bdd75ee51e1e5cdbdd03f6e7beca063280e34
|
[
"MIT"
] | 117
|
2020-09-30T16:31:42.000Z
|
2021-09-30T08:08:00.000Z
|
Algorithms/14_disjoint_set/disjoint-set.py
|
shouvikch97/Hacktoberfest-2k20
|
293bdd75ee51e1e5cdbdd03f6e7beca063280e34
|
[
"MIT"
] | 79
|
2020-09-30T21:18:39.000Z
|
2021-09-30T08:19:19.000Z
|
# Python3 program to implement Disjoint Set Data Structure for union and find.
class DisjSet:
def __init__(self, n):
self.rank = [1] * n
self.parent = [i for i in range(n)]
# Finds set of given item x
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def Union(self, x, y):
xset = self.find(x)
yset = self.find(y)
if xset == yset:
return
if self.rank[xset] < self.rank[yset]:
self.parent[xset] = yset
elif self.rank[xset] > self.rank[yset]:
self.parent[yset] = xset
else:
self.parent[yset] = xset
self.rank[xset] = self.rank[xset] + 1
# Driver code
obj = DisjSet(5)
obj.Union(0, 2)
obj.Union(4, 2)
obj.Union(3, 1)
if obj.find(4) == obj.find(0):
print('Yes')
else:
print('No')
if obj.find(1) == obj.find(0):
print('Yes')
else:
print('No')
| 21.977778
| 78
| 0.541962
|
dc379367466c4018437418cfa5cb214cfab9d543
| 37,728
|
py
|
Python
|
pyfarm/jobtypes/core/internals.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
pyfarm/jobtypes/core/internals.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
pyfarm/jobtypes/core/internals.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Job Type Internals
==================
Contains classes which contain internal methods for
the :class:`pyfarm.jobtypes.core.jobtype.JobType` class.
"""
import imp
import os
import sys
import tempfile
import time
from collections import namedtuple
from errno import EEXIST
from datetime import datetime
from os.path import dirname, join, isfile, basename
from uuid import UUID
from functools import partial
try:
import pwd
except ImportError: # pragma: no cover
pwd = NotImplemented
try:
import grp
except ImportError: # pragma: no cover
grp = NotImplemented
try:
from httplib import (
OK, INTERNAL_SERVER_ERROR, CREATED, ACCEPTED, CONFLICT)
except ImportError: # pragma: no cover
from http.client import (
OK, INTERNAL_SERVER_ERROR, CREATED, ACCEPTED, CONFLICT)
from psutil import disk_usage
from twisted.internet import reactor, threads
from twisted.internet.defer import Deferred, DeferredList, succeed
from twisted.web._newclient import (
ResponseNeverReceived, RequestTransmissionFailed)
import treq
from pyfarm.core.enums import WINDOWS, INTEGER_TYPES, STRING_TYPES, WorkState
from pyfarm.agent.config import config
from pyfarm.agent.logger import getLogger
from pyfarm.agent.http.core.client import get, post, http_retry_delay
from pyfarm.agent.utility import remove_file, remove_directory
from pyfarm.jobtypes.core.log import STDOUT, STDERR, logpool
from pyfarm.jobtypes.core.process import ReplaceEnvironment, ProcessProtocol
USER_GROUP_TYPES = tuple(
list(STRING_TYPES) + list(INTEGER_TYPES) + [type(None)])
ITERABLE_CONTAINERS = (list, tuple, set)
logcache = getLogger("jobtypes.cache")
logger = getLogger("jobtypes.core")
logfile = getLogger("jobtypes.log")
ProcessData = namedtuple(
"ProcessData", ("protocol", "started", "stopped"))
class InsufficientSpaceError(Exception):
pass
class Cache(object):
"""Internal methods for caching job types"""
cache = {}
JOBTYPE_VERSION_URL = \
"%(master_api)s/jobtypes/%(name)s/versions/%(version)s"
CACHE_DIRECTORY = config.get("jobtype_cache_directory", "")
if not CACHE_DIRECTORY: # pragma: no cover
CACHE_DIRECTORY = None # make sure it's None
logger.warning("Job type cache directory has been disabled.")
else:
try:
os.makedirs(CACHE_DIRECTORY)
except OSError as e: # pragma: no cover
if e.errno != EEXIST:
logger.error(
"Failed to create %r. Job type caching is "
"now disabled.", CACHE_DIRECTORY)
raise
else:
logger.info("Created job type cache directory %r", CACHE_DIRECTORY)
logger.debug("Job type cache directory is %r", CACHE_DIRECTORY)
@classmethod
def _download_jobtype(cls, name, version):
"""
Downloads the job type specified in ``assignment``. This
method will pass the response it receives to :meth:`_cache_jobtype`
however failures will be retried.
"""
logger.debug("Downloading job type %r version %s", name, version)
url = str(cls.JOBTYPE_VERSION_URL % {
"master_api": config["master_api"],
"name": name, "version": version})
result = Deferred()
download = lambda *_: \
get(url,
callback=result.callback,
errback=lambda *_: reactor.callLater(http_retry_delay(),
download))
download()
return result
@classmethod
def _cache_filepath(cls, cache_key, classname, version):
return str(join(
cls.CACHE_DIRECTORY, "%s_%s_v%s.py" % (
cache_key, classname, version)))
@classmethod
def _cache_key(cls, assignment):
return assignment["jobtype"]["name"], assignment["jobtype"]["version"]
@classmethod
def _jobtype_download_complete(cls, response, cache_key):
# Server is offline or experiencing issues right
# now so we should retry the request.
if response.code >= INTERNAL_SERVER_ERROR:
logger.debug(
"Could not download jobtype because of internal server error.")
return reactor.callLater(
http_retry_delay(),
response.request.retry)
downloaded_data = response.json()
if not config["jobtype_enable_cache"]:
logger.debug("Jobtype cache is disabled, loading the jobtype.")
return cls._load_jobtype(downloaded_data, None)
else:
# When the download is complete, cache the results
logger.debug("Caching the jobtype locally.")
caching = cls._cache_jobtype(cache_key, downloaded_data)
caching.addCallback(
lambda result: cls._load_jobtype(*result))
return caching
@classmethod
def _cache_jobtype(cls, cache_key, jobtype):
"""
Once the job type is downloaded this classmethod is called
to store it on disk. In the rare even that we fail to write it
to disk, we store it in memory instead.
"""
if isinstance(cache_key, tuple):
cache_key = cache_key[0]
assert isinstance(cache_key, STRING_TYPES)
assert isinstance(jobtype, dict)
filename = cls._cache_filepath(
cache_key, jobtype["classname"], jobtype["version"])
success = Deferred()
jobtype = jobtype.copy()
def write_to_disk(filename):
parent_dir = dirname(filename)
try:
os.makedirs(parent_dir)
except (IOError, OSError) as e: # pragma: no cover
if e.errno != EEXIST:
logger.error("Failed to create %s: %s", parent_dir, e)
else:
logger.debug("Created %s", parent_dir)
if isfile(filename): # pragma: no cover
logcache.debug("%s is already cached on disk", filename)
jobtype.pop("code", None)
return jobtype, filename
try:
with open(filename, "w") as stream:
stream.write(jobtype["code"])
# If the above fails, use a temp file instead
except (IOError, OSError) as e: # pragma: no cover
fd, tmpfilepath = tempfile.mkstemp(suffix=".py")
logcache.warning(
"Failed to write %s, using %s instead: %s",
filename, tmpfilepath, e)
with os.fdopen(fd, "w") as stream:
stream.write(jobtype["code"])
jobtype.pop("code", None)
return jobtype, tmpfilepath
else:
logger.debug(
"Wrote job type %s version %s to %s",
jobtype["name"], jobtype["version"], filename)
jobtype.pop("code", None)
return jobtype, filename
def written_to_disk(results):
jobtype, filename = results
cls.cache[cache_key] = (jobtype, filename)
logcache.info("Created cache for %r at %s", cache_key, filename)
success.callback((jobtype, filename))
def failed_to_write_to_disk(error): # pragma: no cover
logcache.error(
"Failed to write job type cache to disk, will use "
"memory instead: %s", error)
# The code exists in the job type because it's
# only removed on success.
cls.cache[cache_key] = (jobtype, None)
success.callback((jobtype, None))
# Defer the write process to a thread so we don't
# block the reactor if the write is slow
logger.debug(
"Caching job type %s version %s to %s",
jobtype["classname"], jobtype.get("version", "?"), filename)
writer = threads.deferToThread(write_to_disk, filename)
writer.addCallbacks(written_to_disk, failed_to_write_to_disk)
return success
@classmethod
def _module_for_jobtype(cls, jobtype):
return "pyfarm.jobtypes.cached.%s%s%s" % (
jobtype["classname"], jobtype["name"], jobtype["version"])
@classmethod
def _load_jobtype(cls, jobtype, filepath):
def load_jobtype(jobtype_data, path):
module_name = cls._module_for_jobtype(jobtype_data)
# Create or load the module
if filepath is not None:
logger.debug("Attempting to load module from file path %s",
filepath)
try:
module = imp.load_source(module_name, path)
except Exception as e:
type = sys.exc_info()[0]
value = sys.exc_info()[1]
logger.error("Importing module from jobtype file failed: "
"%s, value: %s", type, value)
raise
else:
logcache.warning(
"Loading (%s, %s) directly from memory",
jobtype_data["name"], jobtype_data["version"])
module = imp.new_module(module_name)
exec jobtype_data["code"] in module.__dict__
sys.modules[module_name] = module
logger.debug("Returning class %s from module",
jobtype_data["classname"])
return getattr(module, jobtype_data["classname"])
# Load the job type itself in a thread so we limit disk I/O
# and other blocking issues in the main thread.
return threads.deferToThread(load_jobtype, jobtype, filepath)
class Process(object):
"""Methods related to process control and management"""
logging = {}
def __init__(self):
self.start_called = False
self.stop_called = False
def _before_spawn_process(self, command, protocol):
logger.debug(
"%r._before_spawn_process(%r, %r)", self, command, protocol)
self.before_spawn_process(command, protocol)
def _spawn_twisted_process(
self, command, process_protocol, kwargs):
"""
Handles the spawning of the process itself using
:func:`reactor.spawnProcess`.
:param tuple _:
An ignored argument containing the protocol id and
csv log file
"""
self._before_spawn_process(command, process_protocol)
# The way Twisted handles the env keyword varies by platform. To
kwargs.setdefault("env", None)
if kwargs["env"] is not None:
raise RuntimeError(
"The `env` keyword should always be set to None.")
with ReplaceEnvironment(command.env):
try:
reactor.spawnProcess(process_protocol, command.command, **kwargs)
except Exception as e:
logger.error("Exception on starting process: %s", e)
self.processes.pop(process_protocol.uuid).stopped.callback(None)
self.failed_processes.add((process_protocol, e))
# If there are no processes running at this point, we assume
# the assignment is finished
if len(self.processes) == 0:
logger.error("There was at least one failed process in the "
"assignment %s", self)
for task in self.assignment["tasks"]:
if task["id"] not in self.finished_tasks:
self.set_task_state(task, WorkState.FAILED)
else:
logger.info(
"Task %r is already in finished tasks, not setting "
"state to %s", task["id"], WorkState.FAILED)
self.stopped_deferred.callback(None)
def _start(self):
"""
The internal method that gets called to start the job type. Usually
this calls :meth:`start` though more advanced behaviors could
override this and :meth`start`.
.. warning::
Read the source code before overriding this method on your
own. This method sets a couple of instance variables and returns
a tuple that is relied upon elsewhere.
"""
# Make sure _start() is not called twice
if self.start_called:
raise RuntimeError("%s has already been started" % self)
log_path = self.get_csvlog_path(self.uuid)
logpool.open_log(self.uuid, log_path)
self.log_identifier = basename(log_path)
register_log_deferred = self._register_logfile_on_master(
self.log_identifier)
self.started_deferred = Deferred()
self.stopped_deferred = Deferred()
def start_processes(_):
logpool.log(self.uuid, "internal",
"Starting work on job %s, assignment of %s tasks." %
(self.assignment["job"]["title"],
len(self.assignment["tasks"])))
self._before_start()
logger.debug("%r.start()", self.__class__.__name__)
try:
self.start()
self.start_called = True
logger.debug("Collecting started deferreds from spawned "
"processes")
if not self.processes:
logger.warning(
"No processes have been started, firing deferreds "
"immediately.")
self.started_deferred.callback(None)
self.stopped_deferred.callback(None)
else:
logger.debug("Making deferred list for %s started "
"processes", len(self.processes))
processes_deferred = DeferredList(
[process.started for process in self.processes.values()])
processes_deferred.addCallback(
lambda x: self.started_deferred.callback(x))
except Exception as e:
self.started_deferred.errback(e)
self.stopped_deferred.errback(e)
register_log_deferred.addCallback(start_processes)
register_log_deferred.addErrback(
lambda x: self.started_deferred.errback(x))
register_log_deferred.addErrback(
lambda x: self.stopped_deferred.errback(x))
return self.started_deferred, self.stopped_deferred
def _stop(self):
if self.stop_called:
raise RuntimeError("%s has already been stopped" % self)
return self.stop()
def _before_start(self):
logger.debug("%r._before_start()", self)
self.before_start()
def _process_started(self, protocol):
"""
Called by :meth:`.ProcessProtocol.connectionMade` when a process has
started running.
"""
logger.debug("%r._process_started(%r)", self, protocol)
logpool.log(self.uuid, "internal", "Started %r" % protocol,
protocol.pid)
process_data = self.processes[protocol.uuid]
process_data.started.callback(protocol)
if not self.stop_called:
self.process_started(protocol)
else:
self.stop()
def _process_stopped(self, protocol, reason):
"""
Internal implementation for :meth:`process_stopped`.
If ``--capture-process-output`` was set when the agent was launched
all standard output from the process will be sent to the stdout
of the agent itself. In all other cases we send the data to the
logger pool so it can be stored in a file without blocking the
event loop.
"""
logger.info("%r stopped (code: %r)", protocol, reason.value.exitCode)
process_data = self.processes.pop(protocol.uuid)
try:
successful = self.is_successful(protocol, reason)
except Exception as e:
message = ("Exception caught from is_successful(): %r. "
"Assuming not successful." % e)
logger.error(message)
self._log(message)
successful = False
if successful:
logpool.log(
self.uuid, "internal",
"Process has terminated successfully, code %s" %
reason.value.exitCode, protocol.pid)
else:
self.failed_processes.add((protocol, reason))
logpool.log(
self.uuid, "internal",
"Process has not terminated successfully, code %s" %
reason.value.exitCode, protocol.pid)
try:
self.process_stopped(protocol, reason)
except Exception as e:
logger.error("Exception caught from process_stopped: %s", e)
process_data.stopped.callback(reason)
# If there are no processes running at this point, we assume
# the assignment is finished
if len(self.processes) == 0:
self.stopped_deferred.callback(None)
return succeed([])
def _spawn_process(self, command):
"""
Starts one child process using input from :meth:`command_data`.
Job types should never start child processes through any other
means. The only exception to this rule is code that resides in
:meth:`prepare_for_job`, which should use
:meth:`spawn_persistent_job_process` instead.
:raises OSError:
Raised if `working_dir` was provided but the provided
path does not exist
:raises EnvironmentError:
Raised if an attempt is made to change the user or
group without root access. This error will only occur on
Linux or Unix platforms.
"""
process_protocol = self.PROCESS_PROTOCOL(self)
process_protocol.id = getattr(command, "id", None)
if not isinstance(process_protocol, ProcessProtocol):
raise TypeError("Expected ProcessProtocol for `protocol`")
# The first argument should always be the command name by convention.
# Under Windows, this needs to be the whole path, under POSIX only the
# basename.
if WINDOWS:
arguments = [command.command] + list(command.arguments)
else:
arguments = [basename(command.command)] + list(command.arguments)
# WARNING: `env` should always be None to ensure the same operation
# of the environment setup across platforms. See Twisted's
# documentation for more information on why `env` should be None:
# http://twistedmatrix.com/documents/current/api/
# twisted.internet.interfaces.IReactorProcess.spawnProcess.html
kwargs = {"args": arguments, "env": None}
uid, gid = self.get_uid_gid(command.user, command.group)
if uid is not None:
kwargs.update(uid=uid)
if gid is not None:
kwargs.update(gid=gid)
# Capture the protocol instance so we can keep track
# of the process we're about to spawn.
self.processes[process_protocol.uuid] = ProcessData(
protocol=process_protocol, started=Deferred(), stopped=Deferred())
return self._spawn_twisted_process(command, process_protocol, kwargs)
def _process_output(self, protocol, output, stream):
"""
Called by :meth:`.ProcessProtocol.outReceived` and
:meth:`.ProcessProtocol.errReceived` whenever output is produced
by a process. This method will wire up the proper calls under the
hood to process the output.
"""
if stream == STDOUT:
line_fragments = self._stdout_line_fragments
line_handler = self.handle_stdout_line
elif stream == STDERR:
line_fragments = self._stderr_line_fragments
line_handler = self.handle_stderr_line
else:
raise ValueError("Expected STDOUT or STDERR for `stream`")
self.process_output(protocol, output, line_fragments, line_handler)
def _has_running_processes(self):
"""
Internal functionto determine whether the batch represented by this
instance still has running child processes.
"""
for process in self.processes.values():
if process.protocol.running():
return True
return False
def _register_logfile_on_master(self, log_path):
def post_logfile(task, log_path, post_deferred=None, num_retry_errors=0,
delay=0):
deferred = post_deferred or Deferred()
url = "%s/jobs/%s/tasks/%s/attempts/%s/logs/" % (
config["master_api"], self.assignment["job"]["id"], task["id"],
task["attempt"])
data = {"identifier": log_path,
"agent_id": self.node()["id"]}
post_func = partial(
post, url, data=data,
callback=lambda x: result_callback(task, log_path, deferred, x),
errback=lambda x: error_callback(task, log_path, deferred,
num_retry_errors, x))
reactor.callLater(delay, post_func)
return deferred
def result_callback(task, log_path, deferred, response):
if 500 <= response.code < 600:
delay = http_retry_delay()
logger.error(
"Server side error while registering log file %s for "
"task %s (frame %s) in job %s (id %s), status code: %s. "
"Retrying in %s seconds",
log_path, task["id"], task["frame"],
self.assignment["job"]["title"],
self.assignment["job"]["id"], response.code, delay)
post_logfile(task, log_path, post_deferred=deferred,
delay=delay)
# The server will return CONFLICT if we try to register a logfile
# twice.
elif response.code not in [OK, CONFLICT, CREATED]:
# Nothing else we could do about that, this is
# a problem on our end.
logger.error(
"Could not register logfile %s for task %s (frame %s) in "
"job %s (id %s), status code: %s. This is a client side "
"error, giving up.",
log_path, task["id"], task["frame"],
self.assignment["job"]["title"],
self.assignment["job"]["id"], response.code)
deferred.errback(None)
else:
logger.info("Registered logfile %s for task %s on master",
log_path, task["id"])
deferred.callback(None)
def error_callback(task, log_path, deferred, num_retry_errors,
failure_reason):
if num_retry_errors > config["broken_connection_max_retry"]:
logger.error(
"Error while registering logfile %s for task %s on master. "
"Maximum number of retries reached. Not retrying the "
"request.", log_path, task["id"])
deferred.errback(None)
else:
if (failure_reason.type in
(ResponseNeverReceived, RequestTransmissionFailed)):
logger.debug(
"Error while registering logfile %s for task %s on "
"master: %s, retrying immediately",
log_path, task["id"], failure_reason.type.__name__)
post_logfile(task, log_path, post_deferred=deferred)
else:
delay = http_retry_delay()
logger.error(
"Error while registering logfile %s for task %s on "
"master: %r, retrying in %s seconds.",
log_path, task["id"], failure_reason, delay)
post_logfile(task, log_path, post_deferred=deferred,
delay=delay)
deferreds = []
for task in self.assignment["tasks"]:
deferreds.append(post_logfile(task, log_path))
return DeferredList(deferreds)
def _upload_logfile(self):
path = join(config["jobtype_task_logs"], self.log_identifier)
url = "%s/jobs/%s/tasks/%s/attempts/%s/logs/%s/logfile" % (
config["master_api"], self.assignment["job"]["id"],
self.assignment["tasks"][0]["id"],
self.assignment["tasks"][0]["attempt"],
self.log_identifier)
upload_deferred = Deferred()
def upload(url, log_identifier, delay=0):
logfile = open(path, "rb")
if delay != 0:
reactor.callLater(delay, upload, url,
log_identifier=log_identifier)
else:
# FIXME persistent=False is a workaround to help with some
# problems in unit testing.
deferred = treq.put(url=url, data=logfile,
headers={"Content-Type": ["text/csv"]},
persistent=False)
deferred.addCallback(lambda x: result_callback(
url, log_identifier, x))
deferred.addErrback(lambda x: error_callback(
url, log_identifier, x))
def result_callback(url, log_identifier, response):
if 500 <= response.code < 600:
delay = http_retry_delay()
logger.error(
"Server side error while uploading log file %s, "
"status code: %s. Retrying. in %s seconds",
log_identifier, response.code, delay)
upload(url, log_identifier, delay=delay)
elif response.code not in [OK, CREATED, ACCEPTED]:
# Nothing else we could do about that, this is
# a problem on our end.
logger.error(
"Could not upload logfile %s status code: %s. "
"This is a client side error, giving up.",
log_identifier, response.code)
try:
upload_deferred.errback(ValueError(
"Bad return code on uploading logfile: %s"
% response.code))
except Exception as e:
logger.error(
"Caught exception calling upload_deferred.errback: %s",
e)
else:
logger.info("Uploaded logfile %s for to master",
log_identifier)
try:
upload_deferred.callback(None)
except Exception as e:
logger.error(
"Caught exception calling upload_deferred.callback: %s",
e)
def error_callback(url, log_identifier, failure_reason):
if (failure_reason.type in
(ResponseNeverReceived, RequestTransmissionFailed)):
logger.debug(
"Error while uploading logfile %s to master: "
"%s, retrying immediately",
log_identifier, failure_reason.type.__name__)
upload(url, log_identifier)
else:
delay = http_retry_delay()
logger.error(
"Error while uploading logfile %s to master: "
"%r, retrying in %s seconds.",
log_identifier, failure_reason, delay)
upload(url, log_identifier, delay=delay)
logger.info("Uploading log file %s to master, URL %r",
self.log_identifier, url)
upload(url, self.log_identifier)
return upload_deferred
class System(object):
# overridden in the job type
_tempdirs = NotImplemented
uuid = NotImplemented
def _get_uid_gid_value(self, value, value_name, func_name,
module, module_name):
"""
Internal function which handles both user name and group conversion.
"""
# This platform does not implement the module
if module is NotImplemented:
logger.warning(
"This platform does not implement the %r module, skipping "
"%s()", module_name, func_name)
# Convert a user/group string to an integer
elif isinstance(value, STRING_TYPES):
try:
if module_name == "pwd":
return pwd.getpwnam(value).pw_uid
elif module_name == "grp":
return grp.getgrnam(value).gr_gid
else:
raise ValueError(
"Internal error, failed to get module to use for "
"conversion. Was given %r" % module)
except KeyError:
logger.error(
"Failed to convert %s to a %s",
value, func_name.split("_")[1])
if not config.get("jobtype_ignore_id_mapping_errors"):
raise
# Verify that the provided user/group string is real
elif isinstance(value, INTEGER_TYPES):
try:
if module_name == "pwd":
pwd.getpwuid(value)
elif module_name == "grp":
grp.getgrgid(value)
else:
raise ValueError(
"Internal error, failed to get module to use for "
"conversion. Was given %r" % module)
# Seems to check out, return the original value
return value
except KeyError:
logger.error(
"%s %s does not seem to exist", value_name, value)
if not config.get("jobtype_ignore_id_mapping_errors"):
raise
else:
raise TypeError(
"Expected an integer or string for `%s`" % value_name)
def _remove_directories(self, directories, retry_on_exit=True):
"""
Removes multiple multiple directories at once, retrying on exit
for each failure.
"""
assert isinstance(directories, (list, tuple, set))
for directory in directories:
remove_directory(
directory, raise_=False, retry_on_exit=retry_on_exit)
def _remove_tempdirs(self):
"""
Iterates over all temporary directories in ``_tempdirs`` and removes
them from disk. This work will be done in a thread so it does not
block the reactor.
"""
assert isinstance(self._tempdirs, set)
if not self._tempdirs:
return
reactor.callInThread(self._remove_directories, self._tempdirs.copy())
self._tempdirs.clear()
def _cleanup_system_temp_dir(self, minimum_age=604800):
"""
Cleans up old file from the system's temp directory to reclaim space.
Files that cannot be deleted, for example because of missing
permissions, are silently ignored.
.. warning::
This will delete files from the system temp directory.
"""
tempdir = tempfile.gettempdir()
# followlinks=False is the default for os.walk. I am specifying it
# explicitly here to make it more obvious. Setting this to True
# instead might make us delete files outside of tempdir, if there is
# a symlink in there somewhere.
for root, dirs, files in os.walk(tempdir, topdown=True,
followlinks=False):
# Don't delete our own temp files
if root == dirname(config.tempdir):
dirs[:] = [] # Do not iterate over sub directories in this root
continue
for filename in files:
fullpath = join(root, filename)
stat_result = os.stat(fullpath)
timestamp = max(stat_result.st_atime, stat_result.st_mtime)
if timestamp + minimum_age < time.time():
logger.debug("Deleting tempfile %s", fullpath)
remove_file(fullpath, retry_on_exit=False, raise_=False)
def _ensure_free_space_in_temp_dir(self, tempdir, space, minimum_age=None):
"""
Ensures that at least space bytes of data can be stored on the volume
on which tempdir is located, deleting files from tempdir if necessary.
.. warning::
This will delete files in tempdir to reclaim storage space.
:raises InsufficientSpaceError:
Raised if enough space cannot be claimed.
"""
assert isinstance(tempdir, STRING_TYPES), "Expected string for tempdir"
try:
space = int(space)
except (ValueError, TypeError):
raise TypeError(
"Could not convert value %r for `space` in "
"_ensure_free_space_in_temp_dir() to an integer." % space)
try:
os.makedirs(tempdir)
except OSError as e: # pragma: no cover
if e.errno != EEXIST:
raise
if disk_usage(tempdir).free >= space:
return
tempfiles = []
# followlinks=False is the default for os.walk. I am specifying it
# explicitly here to make it more obvious. Setting this to True
# instead might make us delete files outside of tempdir, if there is
# a symlink in there somewhere.
for root, dirs, files in os.walk(tempdir, followlinks=False):
for filename in files:
fullpath = join(root, filename)
atime = os.stat(fullpath).st_atime
tempfiles.append({"filepath": fullpath, "atime": atime})
tempfiles.sort(key=lambda x: x["atime"])
while disk_usage(tempdir).free < space:
if not tempfiles:
raise InsufficientSpaceError("Cannot free enough space in temp "
"directory %s" % tempdir)
element = tempfiles.pop(0)
if (not minimum_age or
os.stat(element["filepath"]).st_mtime + minimum_age <
time.time()):
logger.debug("Deleting tempfile %s", element["filepath"])
remove_file(
element["filepath"], retry_on_exit=False, raise_=False)
else: # pragma: no cover
logger.debug("Not deleting tempfile %s, it is newer than %s "
"seconds", element["filepath"], minimum_age)
def _log(self, message):
"""
Log a message from the jobtype itself to the process' log file.
Useful for debugging jobtypes.
"""
assert isinstance(self.uuid, UUID)
logpool.log(self.uuid, "jobtype", message)
class TypeChecks(object):
"""
Helper static methods for performing type checks on
input arguments.
"""
@staticmethod
def _check_expandvars_inputs(value, environment):
"""Checks input arguments for :meth:`expandvars`"""
if not isinstance(value, STRING_TYPES):
raise TypeError("Expected a string for `value`")
if environment is not None and not isinstance(environment, dict):
raise TypeError("Expected None or a dictionary for `environment`")
@staticmethod
def _check_map_path_inputs(path):
"""Checks input arguments for :meth:`map_path`"""
if not isinstance(path, STRING_TYPES):
raise TypeError("Expected string for `path`")
@staticmethod
def _check_csvlog_path_inputs(protocol_uuid, now):
"""Checks input arguments for :meth:`get_csvlog_path`"""
if not isinstance(protocol_uuid, UUID):
raise TypeError("Expected UUID for `protocol_uuid`")
if now is not None and not isinstance(now, datetime):
raise TypeError("Expected None or datetime for `now`")
@staticmethod
def _check_command_list_inputs(cmdlist):
"""Checks input arguments for :meth:`get_command_list`"""
if not isinstance(cmdlist, (tuple, list)):
raise TypeError("Expected tuple or list for `cmdlist`")
@staticmethod
def _check_set_states_inputs(tasks, state):
"""Checks input arguments for :meth:`set_states`"""
if not isinstance(tasks, ITERABLE_CONTAINERS):
raise TypeError("Expected tuple, list or set for `tasks`")
if state not in WorkState:
raise ValueError("Expected `state` to be in %s" % list(WorkState))
| 39.797468
| 81
| 0.577873
|
0d6e88e22e3700d2781e5c2bde763e0f0ae75829
| 2,412
|
py
|
Python
|
philips_lights/light_driver.py
|
LBNL-ETA/LPDM-Drivers
|
0190ecb1348b10d5fb7c5b60ca30ebbbbebe094e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
philips_lights/light_driver.py
|
LBNL-ETA/LPDM-Drivers
|
0190ecb1348b10d5fb7c5b60ca30ebbbbebe094e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
philips_lights/light_driver.py
|
LBNL-ETA/LPDM-Drivers
|
0190ecb1348b10d5fb7c5b60ca30ebbbbebe094e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import requests
from datetime import datetime, timedelta
class Light_Driver(object):
def __init__(self, url, auth, switch_point_low = 0.05, switch_point_high = 0.50):
self.url = url
self.auth = auth
self.command_template = "<?xml version=\"1.0\" encoding=\"utf-8\"?><LG_CMD><CMD><CMDID>SET_MODE_BC</CMDID><MODE>{mode}</MODE></CMD></LG_CMD>\n\n"
self.switch_point_low = switch_point_low
self.switch_point_high = switch_point_high
self.last_set_time = datetime.now() - timedelta(minutes=10)
self.time_between_commands = timedelta(minutes=5)
self.last_command = None
def get_charge_command(self):
return self.command_template.format(mode="CHARGE")
def get_discharge_command(self):
return self.command_template.format(mode="DISCHARGE")
def get_grid_command(self):
return self.command_template.format(mode="GRID")
def send_command(self, command):
cmd = {"data": command}
response = requests.post(self.url, data = cmd, auth=self.auth)
return response
def set_to_charge(self):
cmd = self.get_charge_command()
return self.send_command(cmd)
def set_to_discharge(self):
cmd = self.get_discharge_command()
return self.send_command(cmd)
def set_to_grid(self):
cmd = self.get_grid_command()
return self.send_command(cmd)
def set_power_availablity(self, power_level):
"""
Expects power_level to be between 0 and 1.
Light is set to charge if the level is below the low switch point
Set to grid if between the two points and set to discharge
if above the high switch point
"""
command_to_run = None
if power_level < self.switch_point_low:
command_to_run = self.set_to_charge
elif self.switch_point_low <= power_level <= self.switch_point_high:
command_to_run = self.set_to_grid
else:
command_to_run = self.set_to_discharge
now = datetime.now()
if ((now - self.last_set_time) < self.time_between_commands) or (command_to_run == self.last_command):
return
self.last_command = command_to_run
self.last_set_time = now
return command_to_run()
| 36.545455
| 153
| 0.630182
|
f352dc5b6fa085afe9877abaefc121366e99785a
| 10,418
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20170601/get_network_interface.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20170601/get_network_interface.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20170601/get_network_interface.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceResult',
'AwaitableGetNetworkInterfaceResult',
'get_network_interface',
]
@pulumi.output_type
class GetNetworkInterfaceResult:
"""
A network interface in a resource group.
"""
def __init__(__self__, dns_settings=None, enable_accelerated_networking=None, enable_ip_forwarding=None, etag=None, id=None, ip_configurations=None, location=None, mac_address=None, name=None, network_security_group=None, primary=None, provisioning_state=None, resource_guid=None, tags=None, type=None, virtual_machine=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if enable_accelerated_networking and not isinstance(enable_accelerated_networking, bool):
raise TypeError("Expected argument 'enable_accelerated_networking' to be a bool")
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding and not isinstance(enable_ip_forwarding, bool):
raise TypeError("Expected argument 'enable_ip_forwarding' to be a bool")
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if mac_address and not isinstance(mac_address, str):
raise TypeError("Expected argument 'mac_address' to be a str")
pulumi.set(__self__, "mac_address", mac_address)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group and not isinstance(network_security_group, dict):
raise TypeError("Expected argument 'network_security_group' to be a dict")
pulumi.set(__self__, "network_security_group", network_security_group)
if primary and not isinstance(primary, bool):
raise TypeError("Expected argument 'primary' to be a bool")
pulumi.set(__self__, "primary", primary)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_machine and not isinstance(virtual_machine, dict):
raise TypeError("Expected argument 'virtual_machine' to be a dict")
pulumi.set(__self__, "virtual_machine", virtual_machine)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[bool]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> Optional[bool]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Gets whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of a virtual machine.
"""
return pulumi.get(self, "virtual_machine")
class AwaitableGetNetworkInterfaceResult(GetNetworkInterfaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceResult(
dns_settings=self.dns_settings,
enable_accelerated_networking=self.enable_accelerated_networking,
enable_ip_forwarding=self.enable_ip_forwarding,
etag=self.etag,
id=self.id,
ip_configurations=self.ip_configurations,
location=self.location,
mac_address=self.mac_address,
name=self.name,
network_security_group=self.network_security_group,
primary=self.primary,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type,
virtual_machine=self.virtual_machine)
def get_network_interface(expand: Optional[str] = None,
network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceResult:
"""
A network interface in a resource group.
:param str expand: Expands referenced resources.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20170601:getNetworkInterface', __args__, opts=opts, typ=GetNetworkInterfaceResult).value
return AwaitableGetNetworkInterfaceResult(
dns_settings=__ret__.dns_settings,
enable_accelerated_networking=__ret__.enable_accelerated_networking,
enable_ip_forwarding=__ret__.enable_ip_forwarding,
etag=__ret__.etag,
id=__ret__.id,
ip_configurations=__ret__.ip_configurations,
location=__ret__.location,
mac_address=__ret__.mac_address,
name=__ret__.name,
network_security_group=__ret__.network_security_group,
primary=__ret__.primary,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type,
virtual_machine=__ret__.virtual_machine)
| 39.313208
| 329
| 0.665291
|
45a129aed8945874ae551e4b8104eba9ddf60e68
| 3,551
|
py
|
Python
|
sendSMSSkillLambda/package/ask_sdk_model/services/reminder_management/event.py
|
shneydor/aws-alexa-lambda-workshop
|
0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4
|
[
"Apache-2.0"
] | null | null | null |
sendSMSSkillLambda/package/ask_sdk_model/services/reminder_management/event.py
|
shneydor/aws-alexa-lambda-workshop
|
0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4
|
[
"Apache-2.0"
] | null | null | null |
sendSMSSkillLambda/package/ask_sdk_model/services/reminder_management/event.py
|
shneydor/aws-alexa-lambda-workshop
|
0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4
|
[
"Apache-2.0"
] | 1
|
2019-10-11T17:15:20.000Z
|
2019-10-11T17:15:20.000Z
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.services.reminder_management.status import Status
class Event(object):
"""
:param status:
:type status: (optional) ask_sdk_model.services.reminder_management.status.Status
:param alert_token:
:type alert_token: (optional) str
"""
deserialized_types = {
'status': 'ask_sdk_model.services.reminder_management.status.Status',
'alert_token': 'str'
} # type: Dict
attribute_map = {
'status': 'status',
'alert_token': 'alertToken'
} # type: Dict
def __init__(self, status=None, alert_token=None):
# type: (Optional[Status], Optional[str]) -> None
"""
:param status:
:type status: (optional) ask_sdk_model.services.reminder_management.status.Status
:param alert_token:
:type alert_token: (optional) str
"""
self.__discriminator_value = None # type: str
self.status = status
self.alert_token = alert_token
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Event):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 31.149123
| 96
| 0.587722
|
e9d32363f7481c167186eea5676d6d8327c5938f
| 709
|
py
|
Python
|
virtual/bin/django-admin.py
|
KaranjaK/Personal-Gallery
|
bceb89c8bcea9203eccd2f59b24bfbce477ac6a9
|
[
"Unlicense",
"MIT"
] | null | null | null |
virtual/bin/django-admin.py
|
KaranjaK/Personal-Gallery
|
bceb89c8bcea9203eccd2f59b24bfbce477ac6a9
|
[
"Unlicense",
"MIT"
] | null | null | null |
virtual/bin/django-admin.py
|
KaranjaK/Personal-Gallery
|
bceb89c8bcea9203eccd2f59b24bfbce477ac6a9
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/home/kk/Documents/moringa_django/Personal-Gallery/virtual/bin/python3.8
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 32.227273
| 80
| 0.732017
|
05ed1c7ed9ad31fb5ee36d86b1e0f9ca8f0d16cc
| 1,847
|
py
|
Python
|
coverage/IN_CTS/0445-COVERAGE-nir-opt-idiv-const-90/generate_cts_test.py
|
asuonpaa/ShaderTests
|
6a3672040dcfa0d164d313224446496d1775a15e
|
[
"Apache-2.0"
] | null | null | null |
coverage/IN_CTS/0445-COVERAGE-nir-opt-idiv-const-90/generate_cts_test.py
|
asuonpaa/ShaderTests
|
6a3672040dcfa0d164d313224446496d1775a15e
|
[
"Apache-2.0"
] | 47
|
2021-03-11T07:42:51.000Z
|
2022-03-14T06:30:14.000Z
|
coverage/IN_CTS/0445-COVERAGE-nir-opt-idiv-const-90/generate_cts_test.py
|
asuonpaa/ShaderTests
|
6a3672040dcfa0d164d313224446496d1775a15e
|
[
"Apache-2.0"
] | 4
|
2021-03-09T13:37:19.000Z
|
2022-02-25T07:32:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a CTS test.
This module/script is copied next to a specific test in your repository of bugs
to generate an Amber script test suitable for adding to the CTS.
In particular, the Amber script test is suitable for use with |add_amber_tests_to_cts.py|.
"""
import sys
from pathlib import Path
from gfauto import tool, util
def main() -> None:
# Checklist:
# - check output_amber
# - check short_description
# - check comment_text
# - check copyright_year
# - check extra_commands
bug_dir = util.norm_path(Path(__file__).absolute()).parent
tool.glsl_shader_job_crash_to_amber_script_for_google_cts(
source_dir=bug_dir / "reduced_manual",
output_amber=bug_dir / "cov-rcp-negative-int.amber",
work_dir=bug_dir / "work",
# One sentence, 58 characters max., no period, no line breaks.
short_description="A fragment shader that covers specific NIR code paths",
comment_text="""The test passes because the shader always writes red.""",
copyright_year="2021",
extra_commands=tool.AMBER_COMMAND_EXPECT_RED,
is_coverage_gap=True,
)
if __name__ == "__main__":
main()
sys.exit(0)
| 31.844828
| 90
| 0.717921
|
bcab9c036255da26d4d7516d122795351e952ab7
| 11,935
|
py
|
Python
|
calvin/runtime/south/transports/calvinip/twisted/twisted_transport.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 334
|
2015-06-04T15:14:28.000Z
|
2022-02-09T11:14:17.000Z
|
calvin/runtime/south/transports/calvinip/twisted/twisted_transport.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 89
|
2015-06-13T19:15:35.000Z
|
2019-12-03T19:23:20.000Z
|
calvin/runtime/south/transports/calvinip/twisted/twisted_transport.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 112
|
2015-06-06T19:16:54.000Z
|
2020-10-19T01:27:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities.calvin_callback import CalvinCB, CalvinCBClass
from calvin.utilities import calvinlogger
from calvin.utilities import certificate
from calvin.utilities import runtime_credentials
from calvin.runtime.south.transports.lib.twisted import base_transport
from twisted.protocols.basic import Int32StringReceiver
from twisted.internet import error
from twisted.internet import reactor, protocol, ssl, endpoints
_log = calvinlogger.get_logger(__name__)
from calvin.utilities import calvinconfig
_conf = calvinconfig.get()
def create_uri(ip, port):
return "%s://%s:%s" % ("calvinip", ip, port)
# Server
class TwistedCalvinServer(base_transport.CalvinServerBase):
"""
"""
def __init__(self, iface='', node_name=None, port=0, callbacks=None, *args, **kwargs):
super(TwistedCalvinServer, self).__init__(callbacks=callbacks)
self._iface = iface
self._node_name=node_name
self._port = port
self._addr = None
self._tcp_server = None
self._callbacks = callbacks
self._runtime_credentials = None
def start(self):
callbacks = {'connected': [CalvinCB(self._connected)]}
tcp_f = TCPServerFactory(callbacks)
runtime_to_runtime_security = _conf.get("security","runtime_to_runtime_security")
trusted_ca_certs = []
if runtime_to_runtime_security=="tls":
_log.debug("TwistedCalvinServer with TLS chosen")
try:
self._runtime_credentials = runtime_credentials.RuntimeCredentials(self._node_name)
ca_cert_list_str =certificate.get_truststore_as_list_of_strings(certificate.TRUSTSTORE_TRANSPORT)
for ca_cert in ca_cert_list_str:
trusted_ca_certs.append(ssl.Certificate.loadPEM(ca_cert))
server_credentials_data = self._runtime_credentials.get_credentials()
server_credentials = ssl.PrivateCertificate.loadPEM(server_credentials_data)
except Exception as err:
_log.exception("Server failed to load credentials, err={}".format(err))
try:
self._tcp_server = reactor.listenSSL(self._port, tcp_f, server_credentials.options(*trusted_ca_certs), interface=self._iface)
except Exception as err:
_log.exception("Server failed listenSSL, err={}".format(err))
else:
_log.debug("TwistedCalvinServer without TLS chosen")
try:
self._tcp_server = reactor.listenTCP(self._port, tcp_f, interface=self._iface)
except error.CannotListenError:
_log.exception("Could not listen on port %s:%s", self._iface, self._port)
raise
except Exception as exc:
_log.exception("Failed when trying listening on port %s:%s", self._iface, self._port)
raise
self._port = self._tcp_server.getHost().port
self._callback_execute('server_started', self._port)
return self._port
def stop(self):
_log.debug("Stopping server %s", self._tcp_server)
def fire_callback(args):
_log.debug("Server stopped %s", self._tcp_server)
self._callback_execute('server_stopped')
def fire_errback(args):
_log.warning("Server did not stop as excpected %s", args)
self._callback_execute('server_stopped')
if self._tcp_server:
d = self._tcp_server.stopListening()
self._tcp_server = None
d.addCallback(fire_callback)
d.addErrback(fire_errback)
def is_listening(self):
return self._tcp_server is not None
def _connected(self, proto):
self._callback_execute('client_connected', create_uri(proto.transport.getPeer().host,
proto.transport.getPeer().port), proto)
class StringProtocol(CalvinCBClass, Int32StringReceiver):
def __init__(self, callbacks):
super(StringProtocol, self).__init__(callbacks)
self._callback_execute('set_proto', self)
self.MAX_LENGTH = 1024*1024*20
def connectionMade(self):
self._callback_execute('connected', self)
def lengthLimitExceeded(self, length):
_log.error("String length recieved to big package was dumped, length was %s and max length is %s", length, self.MAX_LENGTH)
def connectionLost(self, reason):
self._callback_execute('disconnected', reason)
# TODO: Remove all callbacks
def stringReceived(self, data):
"As soon as any data is received, send it to callback"
self._callback_execute('data', data)
class TCPServerFactory(protocol.ServerFactory):
protocol = StringProtocol
def __init__(self, callbacks):
# For the protocol
self._callbacks = callbacks
def buildProtocol(self, addr):
proto = self.protocol(self._callbacks)
return proto
# Client
class TwistedCalvinTransport(base_transport.CalvinTransportBase):
def __init__(self, host, port, callbacks=None, proto=None, node_name=None, server_node_name=None, *args, **kwargs):
super(TwistedCalvinTransport, self).__init__(host, port, callbacks=callbacks)
self._host_ip = host
self._host_port = port
self._proto = proto
self._factory = None
self._node_name = node_name
self._server_node_name=server_node_name
self._runtime_credentials = None
# Server created us already have a proto
if proto:
proto.callback_register('connected', CalvinCB(self._connected))
proto.callback_register('disconnected', CalvinCB(self._disconnected))
proto.callback_register('data', CalvinCB(self._data))
self._callbacks = callbacks
#If TLS is chosen, ensure that a node_name and a server_node_name are set
runtime_to_runtime_security = _conf.get("security","runtime_to_runtime_security")
if (runtime_to_runtime_security=="tls"):
if self._node_name==None or self._server_node_name==None:
_log.error("For TLS, both node_name and server_node_name must be given as input"
"\n\tself._node_name={}"
"\n\tself._server_node_name={}".format(self._node_name, self._server_node_name))
raise Exception("For TLS, both node_name and server_node_name must be given as input")
def is_connected(self):
return self._proto is not None
def disconnect(self):
if self._proto:
self._proto.transport.loseConnection()
def send(self, data):
if self._proto:
self._proto.sendString(data)
def join(self):
from twisted.internet._sslverify import OpenSSLCertificateAuthorities
from OpenSSL import crypto
if self._proto:
raise Exception("Already connected")
# Own callbacks
callbacks = {'connected': [CalvinCB(self._connected)],
'disconnected': [CalvinCB(self._disconnected)],
'connection_failed': [CalvinCB(self._connection_failed)],
'data': [CalvinCB(self._data)],
'set_proto': [CalvinCB(self._set_proto)]}
self._factory = TCPClientFactory(callbacks) # addr="%s:%s" % (self._host_ip, self._host_port))
runtime_to_runtime_security = _conf.get("security","runtime_to_runtime_security")
if runtime_to_runtime_security=="tls":
_log.debug("TwistedCalvinTransport with TLS chosen")
trusted_ca_certs = []
try:
self._runtime_credentials = runtime_credentials.RuntimeCredentials(self._node_name)
ca_cert_list_str = certificate.get_truststore_as_list_of_strings(certificate.TRUSTSTORE_TRANSPORT)
for ca_cert in ca_cert_list_str:
trusted_ca_certs.append(crypto.load_certificate(crypto.FILETYPE_PEM, ca_cert))
ca_certs = OpenSSLCertificateAuthorities(trusted_ca_certs)
client_credentials_data =self._runtime_credentials.get_credentials()
client_credentials = ssl.PrivateCertificate.loadPEM(client_credentials_data)
except Exception as err:
_log.error("TwistedCalvinTransport: Failed to load client credentials, err={}".format(err))
raise
try:
options = ssl.optionsForClientTLS(self._server_node_name,
trustRoot=ca_certs,
clientCertificate=client_credentials)
except Exception as err:
_log.error("TwistedCalvinTransport: Failed to create optionsForClientTLS "
"\n\terr={}"
"\n\tself._server_node_name={}".format(err,
self._server_node_name))
raise
try:
endpoint = endpoints.SSL4ClientEndpoint(reactor,
self._host_ip,
int(self._host_port),
options)
except:
_log.error("TwistedCalvinTransport: Client failed connectSSL")
raise
try:
endpoint.connect(self._factory)
except Exception as e:
_log.error("TwistedCalvinTransport: Failed endpoint.connect, e={}".format(e))
raise
else:
reactor.connectTCP(self._host_ip, int(self._host_port), self._factory)
def _set_proto(self, proto):
_log.debug("%s, %s, %s" % (self, '_set_proto', proto))
if self._proto:
_log.error("_set_proto: Already connected")
return
self._proto = proto
def _connected(self, proto):
_log.debug("%s, %s" % (self, 'connected'))
self._callback_execute('connected')
def _disconnected(self, reason):
_log.debug("%s, %s, %s" % (self, 'disconnected', reason))
self._callback_execute('disconnected', reason)
def _connection_failed(self, addr, reason):
_log.debug("%s, %s, %s" % (self, 'connection_failed', reason))
self._callback_execute('connection_failed', reason)
def _data(self, data):
_log.debug("%s, %s, %s" % (self, '_data', data))
self._callback_execute('data', data)
class TCPClientFactory(protocol.ClientFactory, CalvinCBClass):
protocol = StringProtocol
def __init__(self, callbacks):
# For the protocol
self._callbacks = callbacks
super(TCPClientFactory, self).__init__(callbacks)
def clientConnectionFailed(self, connector, reason):
_log.info('Connection failed. reason: %s, dest %s', reason, connector.getDestination())
addr = (connector.getDestination().host, connector.getDestination().port)
self._callback_execute('connection_failed', addr, reason)
def startedConnecting(self, connector):
pass
def buildProtocol(self, addr):
proto = self.protocol(self._callbacks)
return proto
| 42.625
| 141
| 0.636866
|
40563d7bedda12ca9fcd079cc50b80debe4c139c
| 395
|
py
|
Python
|
python/py-collections/collections-counter.py
|
feliposz/hackerrank-solutions
|
fb1d63ca12a0d289362c9b3fb4cb0b79ef73f72f
|
[
"MIT"
] | null | null | null |
python/py-collections/collections-counter.py
|
feliposz/hackerrank-solutions
|
fb1d63ca12a0d289362c9b3fb4cb0b79ef73f72f
|
[
"MIT"
] | null | null | null |
python/py-collections/collections-counter.py
|
feliposz/hackerrank-solutions
|
fb1d63ca12a0d289362c9b3fb4cb0b79ef73f72f
|
[
"MIT"
] | null | null | null |
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import Counter
_ = raw_input()
shoes_list = map(int, raw_input().split(' '))
shoes = Counter(shoes_list)
total = 0
for _ in range(input()):
data = raw_input().split(' ')
size = int(data[0])
value = int(data[1])
if shoes[size] > 0:
total += value
shoes[size] -= 1
print total
| 24.6875
| 69
| 0.635443
|
32ad59bd5e8e77e560af306e6ede7c158611f029
| 1,001
|
py
|
Python
|
workspaces/migrations/0013_workspacewhiteboardapp.py
|
Exaphis/Synchronous
|
17334442ee3d54d6704469bba8a53eb35c1f555e
|
[
"MIT"
] | null | null | null |
workspaces/migrations/0013_workspacewhiteboardapp.py
|
Exaphis/Synchronous
|
17334442ee3d54d6704469bba8a53eb35c1f555e
|
[
"MIT"
] | null | null | null |
workspaces/migrations/0013_workspacewhiteboardapp.py
|
Exaphis/Synchronous
|
17334442ee3d54d6704469bba8a53eb35c1f555e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-20 05:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("workspaces", "0012_auto_20210402_1740"),
]
operations = [
migrations.CreateModel(
name="WorkspaceWhiteboardApp",
fields=[
(
"workspaceapp_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="workspaces.workspaceapp",
),
),
("space_id", models.CharField(max_length=255)),
("edit_hash", models.CharField(max_length=255)),
],
bases=("workspaces.workspaceapp",),
),
]
| 29.441176
| 68
| 0.493506
|
45457c4564eb18062523c243da59230406f73236
| 6,558
|
py
|
Python
|
mlp_class.py
|
uoguelph-mlrg/cnn-moth-detection
|
dbc2b2ef04e341ba8657f4cdb52f2c2108fa4aa3
|
[
"ECL-2.0"
] | 2
|
2019-07-24T06:11:18.000Z
|
2021-09-13T11:36:41.000Z
|
mlp_class.py
|
uoguelph-mlrg/cnn-moth-detection
|
dbc2b2ef04e341ba8657f4cdb52f2c2108fa4aa3
|
[
"ECL-2.0"
] | null | null | null |
mlp_class.py
|
uoguelph-mlrg/cnn-moth-detection
|
dbc2b2ef04e341ba8657f4cdb52f2c2108fa4aa3
|
[
"ECL-2.0"
] | null | null | null |
import numpy
import theano
import theano.tensor as T
from tools_theano import batch_to_anysize, cost_batch_to_any_size
from fileop import load_data
from layers import HiddenLayer, LogisticRegression, relu
from fit import fit
import logging
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng=numpy.random.RandomState(1234),
n_in=784, n_hidden=500, n_out=10,
learning_rate=0.1, n_epochs=200,
batch_size=500, shuffle=True,
alpha_l1=0, alpha_l2=0,
logreg_activation=T.nnet.softmax,
hidden_activation=relu,):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
#####################
# assign parameters #
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.batch_size = batch_size
self.n_hidden = n_hidden
self.n_in = n_in
self.n_out = n_out
self.shuffle = shuffle
self.alpha_l1 = alpha_l1
self.alpha_l2 = alpha_l2
self.rng = rng
self.hidden_activation = hidden_activation
self.logreg_activation = logreg_activation
# assign parameters #
#####################
self.build_model()
# end def __init__
def build_model(self, flag_preserve_params=False):
logging.info('... building the model')
# allocate symbolic variables for the data
self.index = T.lscalar() # index to a [mini]batch
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(rng=self.rng, input=self.x,
n_in=self.n_in, n_out=self.n_hidden,
activation=self.hidden_activation)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=self.n_hidden,
n_out=self.n_out,
activation=self.logreg_activation)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = abs(self.hiddenLayer.W).sum() \
+ abs(self.logRegressionLayer.W).sum()
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \
+ (self.logRegressionLayer.W ** 2).sum()
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
self.cost = self.negative_log_likelihood(self.y) \
+ self.alpha_l1 * self.L1 \
+ self.alpha_l2 * self.L2_sqr
self.grads = T.grad(self.cost, self.params)
# fixed batch size based prediction
self.predict_proba_batch = theano.function([self.x],
self.logRegressionLayer.p_y_given_x)
self.predict_batch = theano.function([self.x],
T.argmax(self.logRegressionLayer.p_y_given_x, axis=1))
self.predict_cost_batch = theano.function([self.x, self.y], self.cost, allow_input_downcast=True)
def predict_cost(self, X, y):
return cost_batch_to_any_size(self.batch_size, self.predict_cost_batch,
X, y)
# end def predict_cost
def predict_proba(self, X):
return batch_to_anysize(self.batch_size, self.predict_proba_batch, X)
# end def predict_proba
def predict(self, X):
return batch_to_anysize(self.batch_size, self.predict_batch, X)
# end def predict
if __name__ == '__main__':
from misc import set_quick_logging
set_quick_logging()
datasets = load_data(data_name='mnist')
clf = MLP(n_epochs=10, batch_size=200)
fit(clf,
train_set=datasets[0],
valid_set=datasets[1],
test_set=datasets[2],
flag_report_test=True,
flag_report_valid=True,
early_stop=True)
print clf.predict_proba_batch(datasets[1][0][0:200])
print clf.predict_batch(datasets[1][0][0:200])
print clf.predict_proba(datasets[1][0])
print clf.predict(datasets[1][0])
print clf.predict_cost_batch(datasets[1][0][0:200], datasets[1][1][0:200])
print clf.predict_cost(datasets[1][0][0:200], datasets[1][1][0:200])
print clf.predict_cost(datasets[1][0], datasets[1][1])
| 37.474286
| 105
| 0.620159
|
9c40b22f138f81c3026b865712d5a21f30870e42
| 180
|
py
|
Python
|
src/elements/properties/point.py
|
s-graveyard/PencilUi
|
f75bac419fb161edd28f225f4b35bced38e3ac8c
|
[
"Unlicense"
] | 1
|
2018-02-14T17:02:37.000Z
|
2018-02-14T17:02:37.000Z
|
src/elements/properties/point.py
|
SanjayGubaju/PencilUi
|
f75bac419fb161edd28f225f4b35bced38e3ac8c
|
[
"Unlicense"
] | null | null | null |
src/elements/properties/point.py
|
SanjayGubaju/PencilUi
|
f75bac419fb161edd28f225f4b35bced38e3ac8c
|
[
"Unlicense"
] | null | null | null |
class Point:
def __init__(self, new_x=0, new_y=0):
self.x = new_x
self.y = new_y
def set_point(self, point):
(self.x, self.y) = (point.x, point.y)
| 22.5
| 45
| 0.555556
|
a4f152299248531886efb645f7537c899db153ba
| 906
|
py
|
Python
|
ckanext/example_idatasetform/plugin_v6.py
|
mabah-mst/ckan
|
105f613272c2e31daa0081ead24c678bf1b55c22
|
[
"Apache-2.0"
] | 2,805
|
2015-01-02T18:13:15.000Z
|
2022-03-31T03:35:01.000Z
|
ckanext/example_idatasetform/plugin_v6.py
|
mabah-mst/ckan
|
105f613272c2e31daa0081ead24c678bf1b55c22
|
[
"Apache-2.0"
] | 3,801
|
2015-01-02T11:05:36.000Z
|
2022-03-31T19:24:37.000Z
|
ckanext/example_idatasetform/plugin_v6.py
|
cascaoSDC/ckan
|
75a08caa7c688ce70229dfea7070cc667a15c5e8
|
[
"BSD-3-Clause"
] | 1,689
|
2015-01-02T19:46:43.000Z
|
2022-03-28T14:59:43.000Z
|
# encoding: utf-8
import ckan.plugins as p
import ckan.plugins.toolkit as tk
def fancy_route(package_type):
return u'Hello, {}'.format(package_type)
def fancy_new_route(package_type):
return u'Hello, new {}'.format(package_type)
def fancy_resource_route(package_type, id):
return u'Hello, {}:{}'.format(package_type, id)
class ExampleIDatasetFormPlugin(p.SingletonPlugin, tk.DefaultDatasetForm):
p.implements(p.IDatasetForm)
def is_fallback(self):
return False
def package_types(self):
return [u'fancy_type']
def prepare_dataset_blueprint(self, package_type, bp):
bp.add_url_rule(u'/fancy-route', view_func=fancy_route)
bp.add_url_rule(u'/new', view_func=fancy_new_route)
return bp
def prepare_resource_blueprint(self, package_type, bp):
bp.add_url_rule(u'/new', view_func=fancy_resource_route)
return bp
| 25.166667
| 74
| 0.714128
|
672cc5936b4e5b0d40adbc00cd1ecbefca4dbc5c
| 1,513
|
py
|
Python
|
gpytorch/__init__.py
|
beyucel/gpytorch
|
a5394937495756945b831d83035349579d8fac31
|
[
"MIT"
] | 2
|
2019-04-19T00:35:49.000Z
|
2019-04-19T02:51:49.000Z
|
gpytorch/__init__.py
|
beyucel/gpytorch
|
a5394937495756945b831d83035349579d8fac31
|
[
"MIT"
] | null | null | null |
gpytorch/__init__.py
|
beyucel/gpytorch
|
a5394937495756945b831d83035349579d8fac31
|
[
"MIT"
] | 1
|
2019-04-19T00:42:35.000Z
|
2019-04-19T00:42:35.000Z
|
#!/usr/bin/env python3
from .module import Module
from . import (
beta_features,
distributions,
kernels,
lazy,
likelihoods,
means,
mlls,
models,
priors,
settings,
utils,
variational,
)
from .functions import (
add_diag,
add_jitter,
dsmm,
inv_matmul,
inv_quad,
inv_quad_logdet,
logdet,
log_normal_cdf,
matmul,
normal_cdf,
root_decomposition,
root_inv_decomposition,
# Deprecated
inv_quad_log_det,
log_det,
)
from .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood
from .lazy import lazify, delazify
__version__ = "0.2.1"
# Old deprecated stuff
fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
__all__ = [
# Submodules
"distributions",
"kernels",
"lazy",
"likelihoods",
"means",
"mlls",
"models",
"priors",
"utils",
"variational",
# Classes
"Module",
"ExactMarginalLogLikelihood",
"VariationalMarginalLogLikelihood",
# Functions
"add_diag",
"add_jitter",
"delazify",
"dsmm",
"inv_matmul",
"inv_quad",
"inv_quad_logdet",
"lazify",
"logdet",
"log_normal_cdf",
"matmul",
"normal_cdf",
"root_decomposition",
"root_inv_decomposition",
# Context managers
"beta_features",
"settings",
# Other
"__version__",
# Deprecated
"fast_pred_var",
"inv_quad_log_det",
"log_det",
]
| 18.011905
| 108
| 0.636484
|
a0a12ac73f402ed74571b8adb0d729d803a48645
| 13,847
|
py
|
Python
|
utils/torch_utils.py
|
akdenizz/yolov5_object_detection
|
e0e9ef458fa9bb9ef9036655bfd008b6249b7485
|
[
"MIT"
] | 5
|
2022-03-04T11:55:14.000Z
|
2022-03-08T07:54:00.000Z
|
utils/torch_utils.py
|
akdenizz/yolov5_object_detection
|
e0e9ef458fa9bb9ef9036655bfd008b6249b7485
|
[
"MIT"
] | null | null | null |
utils/torch_utils.py
|
akdenizz/yolov5_object_detection
|
e0e9ef458fa9bb9ef9036655bfd008b6249b7485
|
[
"MIT"
] | 3
|
2022-03-10T23:17:10.000Z
|
2022-03-19T19:29:51.000Z
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
PyTorch utils
"""
import datetime
import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from utils.general import LOGGER
try:
import thop # for FLOPs computation
except ImportError:
thop = None
# Suppress PyTorch warnings
warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling')
@contextmanager
def torch_distributed_zero_first(local_rank: int):
# Decorator to make all processes in distributed training wait for each local_master to do something
if local_rank not in [-1, 0]:
dist.barrier(device_ids=[local_rank])
yield
if local_rank == 0:
dist.barrier(device_ids=[0])
def date_modified(path=__file__):
# Return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def git_describe(path=Path(__file__).parent): # path must be a directory
# Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError:
return '' # not a git repository
def device_count():
# Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux.
assert platform.system() == 'Linux', 'device_count() function only works on Linux'
try:
cmd = 'nvidia-smi -L | wc -l'
return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])
except Exception:
return 0
def select_device(device='', batch_size=0, newline=True):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'
cpu = device == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()
assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \
f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size > 0: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * (len(s) + 1)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB
else:
s += 'CPU\n'
if not newline:
s = s.rstrip()
LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def time_sync():
# PyTorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(input, ops, n=10, device=None):
# YOLOv5 speed/memory/FLOPs profiler
#
# Usage:
# input = torch.randn(16, 3, 640, 640)
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(input, [m1, m2], n=100) # profile over 100 iterations
results = []
device = device or select_device()
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
f"{'input':>24s}{'output':>24s}")
for x in input if isinstance(input, list) else [input]:
x = x.to(device)
x.requires_grad = True
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
except Exception:
flops = 0
try:
for _ in range(n):
t[0] = time_sync()
y = m(x)
t[1] = time_sync()
try:
_ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
t[2] = time_sync()
except Exception: # no backward method
# print(e) # for debug
t[2] = float('nan')
tf += (t[1] - t[0]) * 1000 / n # ms per op forward
tb += (t[2] - t[1]) * 1000 / n # ms per op backward
mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')
results.append([p, flops, mem, tf, tb, s_in, s_out])
except Exception as e:
print(e)
results.append(None)
torch.cuda.empty_cache()
return results
def is_parallel(model):
# Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def de_parallel(model):
# De-parallelize a model: returns single-GPU model if model is of type DP or DDP
return model.module if is_parallel(model) else model
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0, 0
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# Prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# Prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPs
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
except (ImportError, Exception):
fs = ''
# LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# Scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class EarlyStopping:
# YOLOv5 simple early stopper
def __init__(self, patience=30):
self.best_fitness = 0.0 # i.e. mAP
self.best_epoch = 0
self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop
self.possible_stop = False # possible stop may occur next epoch
def __call__(self, epoch, fitness):
if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training
self.best_epoch = epoch
self.best_fitness = fitness
delta = epoch - self.best_epoch # epochs without improvement
self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch
stop = delta >= self.patience # stop training if patience exceeded
if stop:
LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. '
f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n'
f'To update EarlyStopping(patience={self.patience}) pass a new patience value, '
f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.')
return stop
class ModelEMA:
""" Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models
Keeps a moving average of everything in the model state_dict (parameters and buffers)
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
"""
def __init__(self, model, decay=0.9999, tau=2000, updates=0):
# Create EMA
self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = de_parallel(model).state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1 - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
| 42.216463
| 120
| 0.606269
|
5f9bb93d2e597609550b82157c1f93513ea0c390
| 2,277
|
py
|
Python
|
lib/python2.7/site-packages/pip/_internal/utils/packaging.py
|
DPNT-Sourcecode/CHK-uimw01
|
87144ae10115d7a8df565f5109666f00bc001ce4
|
[
"Apache-2.0"
] | 937
|
2019-05-08T08:46:25.000Z
|
2022-03-31T12:56:07.000Z
|
virtual/lib/python3.6/site-packages/pip/_internal/utils/packaging.py
|
annstella/blog
|
1cdb7e7e7df028a84fae9b7d901116aae577589d
|
[
"MIT"
] | 374
|
2015-12-25T05:38:28.000Z
|
2022-03-03T05:03:36.000Z
|
lib/python2.7/site-packages/pip/_internal/utils/packaging.py
|
anish03/weather-dash
|
d517fa9da9028d1fc5d8fd71d77cee829ddee87b
|
[
"MIT"
] | 354
|
2019-05-10T02:15:26.000Z
|
2022-03-30T05:52:57.000Z
|
from __future__ import absolute_import
import logging
import sys
from email.parser import FeedParser # type: ignore
from pip._vendor import pkg_resources
from pip._vendor.packaging import specifiers, version
from pip._internal import exceptions
logger = logging.getLogger(__name__)
def check_requires_python(requires_python):
"""
Check if the python version in use match the `requires_python` specifier.
Returns `True` if the version of python in use matches the requirement.
Returns `False` if the version of python in use does not matches the
requirement.
Raises an InvalidSpecifier if `requires_python` have an invalid format.
"""
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
# We only use major.minor.micro
python_version = version.parse('.'.join(map(str, sys.version_info[:3])))
return python_version in requires_python_specifier
def get_metadata(dist):
if (isinstance(dist, pkg_resources.DistInfoDistribution) and
dist.has_metadata('METADATA')):
return dist.get_metadata('METADATA')
elif dist.has_metadata('PKG-INFO'):
return dist.get_metadata('PKG-INFO')
def check_dist_requires_python(dist):
metadata = get_metadata(dist)
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
requires_python = pkg_info_dict.get('Requires-Python')
try:
if not check_requires_python(requires_python):
raise exceptions.UnsupportedPythonVersion(
"%s requires Python '%s' but the running Python is %s" % (
dist.project_name,
requires_python,
'.'.join(map(str, sys.version_info[:3])),)
)
except specifiers.InvalidSpecifier as e:
logger.warning(
"Package %s has an invalid Requires-Python entry %s - %s",
dist.project_name, requires_python, e,
)
return
def get_installer(dist):
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
return line.strip()
return ''
| 32.070423
| 77
| 0.680281
|
0ab228dfb5199bc385232e53b7d579f620fbda5f
| 608
|
py
|
Python
|
booky/bookmgr/migrations/0001_initial.py
|
karmek-k/bookmgr
|
647463ecb05aae00c9d8026155829b68ef989220
|
[
"MIT"
] | 1
|
2019-07-03T12:37:16.000Z
|
2019-07-03T12:37:16.000Z
|
booky/bookmgr/migrations/0001_initial.py
|
karmek-k/bookmgr
|
647463ecb05aae00c9d8026155829b68ef989220
|
[
"MIT"
] | null | null | null |
booky/bookmgr/migrations/0001_initial.py
|
karmek-k/bookmgr
|
647463ecb05aae00c9d8026155829b68ef989220
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-06-26 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('description', models.TextField()),
('has_been_read', models.BooleanField(default=False)),
],
),
]
| 25.333333
| 114
| 0.569079
|
4e6b62a231a46fe722266edf92e3704ab697663e
| 27,238
|
py
|
Python
|
src/lib/utils/debugger.py
|
anhvth/RTM3D
|
d2843937de034556694d36fc5cde95e8afe6ae17
|
[
"MIT"
] | 393
|
2020-01-13T07:42:48.000Z
|
2022-03-22T07:15:25.000Z
|
src/lib/utils/debugger.py
|
anhvth/RTM3D
|
d2843937de034556694d36fc5cde95e8afe6ae17
|
[
"MIT"
] | 54
|
2020-02-10T14:52:50.000Z
|
2022-03-15T08:45:11.000Z
|
src/lib/utils/debugger.py
|
anhvth/RTM3D
|
d2843937de034556694d36fc5cde95e8afe6ae17
|
[
"MIT"
] | 73
|
2020-01-14T02:59:21.000Z
|
2022-03-30T04:16:57.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d
import os
import utils.vis_3d_utils as vis_utils
import math
class Debugger(object):
def __init__(self, ipynb=False, theme='black',
num_classes=-1, dataset=None, down_ratio=4):
self.ipynb = ipynb
if not self.ipynb:
import matplotlib.pyplot as plt
self.plt = plt
self.imgs = {}
self.BEV={}
self.theme = theme
colors = [(color_list[_]).astype(np.uint8) \
for _ in range(len(color_list))]
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
if self.theme == 'white':
self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3)
self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8)
self.dim_scale = 1
if dataset == 'coco_hp':
self.names = ['p']
self.num_class = 1
self.num_joints = 17
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[5, 11], [6, 12], [11, 12],
[11, 13], [13, 15], [12, 14], [14, 16]]
self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)]
self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255)]
elif dataset == 'kitti_hp':
self.names = kitti_class_name
self.num_class = 1
self.num_joints = 9
self.edges1 = [[0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6], [5, 6],
[5, 7]]
self.edges = [[0, 1],[1,2],[2,3],[3,0],
[4, 5], [5, 6], [6, 7],[0, 4],[1,5],[2,6],[3,7],
]
self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0)]
self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0),(255, 100, 50)]
elif num_classes == 80 or dataset == 'coco':
self.names = coco_class_name
elif num_classes == 20 or dataset == 'pascal':
self.names = pascal_class_name
elif dataset == 'gta':
self.names = gta_class_name
self.focal_length = 935.3074360871937
self.W = 1920
self.H = 1080
self.dim_scale = 3
elif dataset == 'viper':
self.names = gta_class_name
self.focal_length = 1158
self.W = 1920
self.H = 1080
self.dim_scale = 3
elif num_classes == 3 or dataset == 'kitti':
self.names = kitti_class_name
self.focal_length = 721.5377
self.W = 1242
self.H = 375
elif num_classes == 3 or dataset == 'kitti_hp':
self.names = kitti_class_name
self.focal_length = 721.5377
self.W = 1242
self.H = 375
num_classes = len(self.names)
self.down_ratio=down_ratio
# for bird view
self.world_size = 64
self.out_size = 384
def add_img(self, img, img_id='default', revert_color=False):
if revert_color:
img = 255 - img
self.imgs[img_id] = img.copy()
im_bev = vis_utils.vis_create_bev(width=img.shape[0] * 2)
self.BEV[img_id] = im_bev
def add_bev(self, box,img_id, color=(255,0,0), is_faster = True):
position=box[36:39]
dim=box[32:35]
l = dim[2]
h = dim[0]
w = dim[1]
ori=box[35]
if is_faster:
score_3d = box[4] * (1 / (1 + math.exp(-box[39])))
else:
score_3d = (box[4] + (1 / (1 + math.exp(-box[39]))) + (sum(box[23:32]) / 9)) / 3
self.BEV[img_id] = vis_utils.vis_box_in_bev(self.BEV[img_id], position, [l,h,w], ori,
score=score_3d,
width=self.imgs[img_id].shape[0] * 2, gt=color)
def add_mask(self, mask, bg, imgId = 'default', trans = 0.8):
self.imgs[imgId] = (mask.reshape(
mask.shape[0], mask.shape[1], 1) * 255 * trans + \
bg * (1 - trans)).astype(np.uint8)
def show_img(self, pause = False, imgId = 'default'):
cv2.imshow('{}'.format(imgId), self.imgs[imgId])
if pause:
cv2.waitKey()
def add_blend_img(self, back, fore, img_id='blend', trans=0.7):
if self.theme == 'white':
fore = 255 - fore
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
self.imgs[img_id] = (back * (1. - trans) + fore * trans)
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
'''
# slow version
def gen_colormap(self, img, output_res=None):
# num_classes = len(self.colors)
img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
resized = resized.reshape(output_res[0], output_res[1], 1)
cl = self.colors[i] if not (self.theme == 'white') \
else 255 - self.colors[i]
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
'''
def gen_colormap(self, img, output_res=None):
img = img.copy()
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
'''
# slow
def gen_colormap_hp(self, img, output_res=None):
# num_classes = len(self.colors)
# img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
resized = resized.reshape(output_res[0], output_res[1], 1)
cl = self.colors_hp[i] if not (self.theme == 'white') else \
(255 - np.array(self.colors_hp[i]))
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
'''
def gen_colormap_hp(self, img, output_res=None):
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
def add_rect(self, rect1, rect2, c, conf=1, img_id='default'):
cv2.rectangle(
self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2)
if conf < 1:
cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1)
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'):
bbox = np.array(bbox, dtype=np.int32)
# cat = (int(cat) + 1) % 80
cat = int(cat)
# print('cat', cat, self.names[cat])
c = self.colors[cat][0][0].tolist()
if self.theme == 'white':
c = (255 - np.array(c)).tolist()
txt = '{}{:.1f}'.format(self.names[cat], conf)
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
if show_txt:
cv2.rectangle(self.imgs[img_id],
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
def add_coco_hp(self, points, img_id='default'):
points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
for j in range(self.num_joints):
cv2.circle(self.imgs[img_id],
(points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1)
for j, e in enumerate(self.edges):
if points[e].min() > 0:
cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]),
(points[e[1], 0], points[e[1], 1]), self.ec[j], 2,
lineType=cv2.LINE_AA)
def add_kitti_hp(self, points, img_id='default'):
points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
for j in range(self.num_joints):
cv2.circle(self.imgs[img_id],
(points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1)
# for j, e in enumerate(self.edges):
# if points[e].min() > 0:
# cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]),
# (points[e[1], 0], points[e[1], 1]), (255,0,0), 1,
# lineType=cv2.LINE_AA)
def save_kitti_hp_point(self, points_dim, img_path,opt,img_id='default'):
# points=points_dim[:16]
# dim = points_dim[16:19]
# points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
result_dir=opt.exp_dir
file_number=img_path.split('.')[-2][-6:]
self.write_points_results(result_dir,file_number,points_dim)
def save_kitti_format(self, results, img_path,opt,img_id='default',is_faster=False):
# points=points_dim[:16]
# dim = points_dim[16:19]
# points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
result_dir=opt.results_dir
file_number=img_path.split('.')[-2][-6:]
box=results[:4]
if is_faster:
score=results[4]*(1/(1+math.exp(-results[39])))
else:
score=(results[4]+(1/(1+math.exp(-results[39])))+(sum(results[23:32])/9))/3
dim=results[32:35]
if dim[0] < 0 or dim[1]<0 or dim[2]<0:
print(file_number,dim)
pos=results[36:39]
ori=results[35]
cat=results[40]
det_cats = ['Car', 'Pedestrian', 'Cyclist']
self.write_detection_results(det_cats[int(cat)],result_dir,file_number,box,dim,pos,ori,score)
def write_detection_results(self,cls, result_dir, file_number, box,dim,pos,ori,score):
'''One by one write detection results to KITTI format label files.
'''
if result_dir is None: return
result_dir = result_dir + '/data'
Px = pos[0]
Py = pos[1]
Pz = pos[2]
l =dim[2]
h = dim[0]
w = dim[1]
Py=Py+h/2
pi=np.pi
if ori > 2 * pi:
while ori > 2 * pi:
ori -= 2 * pi
if ori < -2 * pi:
while ori < -2 * pi:
ori += 2 * pi
if ori > pi:
ori = 2 * pi - ori
if ori < -pi:
ori = 2 * pi + pi
alpha = ori - math.atan2(Px, Pz)
# convert the object from cam2 to the cam0 frame
output_str = cls + ' '
output_str += '%.2f %.d ' % (-1, -1)
output_str += '%.7f %.7f %.7f %.7f %.7f ' % (alpha, box[0], box[1], box[2], box[3])
output_str += '%.7f %.7f %.7f %.7f %.7f %.7f %.7f %.7f \n' % (h, w, l, Px, Py, \
Pz, ori, score)
# output_str += '%.2f %.2f %.2f %.2f %.2f ' % (alpha, box[0], box[1], box[2], box[3])
# output_str += '%.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % (h, w, l, Px, Py, \
# Pz, ori, score)
# Write TXT files
if not os.path.exists(result_dir):
os.makedirs(result_dir)
pred_filename = result_dir + '/' + file_number + '.txt'
with open(pred_filename, 'a') as det_file:
det_file.write(output_str)
def write_points_results(self, result_dir, file_number,point_dim):
'''One by one write detection results to KITTI format label files.
'''
if result_dir is None: return
result_dir = result_dir + '/sslrtm3d/56'
# convert the object from cam2 to the cam0 frame
output_str=' '
for w in range(len(point_dim)):
output_str += '%.2f ' % (point_dim[w])
# for ra in range(8):
# output_str+='%.2f %.2f '%(point[ra,0],point[ra,1])
# output_str += '%.2f %.2f %.2f ' % (dim[0], dim[1],dim[2])
output_str += '\n'
# Write TXT files
if not os.path.exists(result_dir):
os.makedirs(result_dir)
pred_filename = result_dir + '/' + file_number + '.txt'
with open(pred_filename, 'a') as det_file:
det_file.write(output_str)
def add_points(self, points, img_id='default'):
num_classes = len(points)
# assert num_classes == len(self.colors)
for i in range(num_classes):
for j in range(len(points[i])):
c = self.colors[i, 0, 0]
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
points[i][j][1] * self.down_ratio),
5, (255, 255, 255), -1)
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
points[i][j][1] * self.down_ratio),
3, (int(c[0]), int(c[1]), int(c[2])), -1)
def show_all_imgs(self, pause=False, time=0):
if not self.ipynb:
for i, v in self.imgs.items():
cv2.imshow('{}'.format(i), v)
for i, v in self.BEV.items():
cv2.imshow('{}BEV'.format(i), v)
if cv2.waitKey(0 if pause else 1) == 27:
import sys
sys.exit(0)
else:
self.ax = None
nImgs = len(self.imgs)
fig=self.plt.figure(figsize=(nImgs * 10,10))
nCols = nImgs
nRows = nImgs // nCols
for i, (k, v) in enumerate(self.imgs.items()):
fig.add_subplot(1, nImgs, i + 1)
if len(v.shape) == 3:
self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))
else:
self.plt.imshow(v)
self.plt.show()
def save_img(self, imgId='default', path='./cache/debug/'):
cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])
def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False):
if genID:
try:
idx = int(np.loadtxt(path + '/id.txt'))
except:
idx = 0
prefix=idx
np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d')
for i, v in self.imgs.items():
cv2.imwrite(path + '/{}{}.png'.format(prefix, i), v)
def remove_side(self, img_id, img):
if not (img_id in self.imgs):
return
ws = img.sum(axis=2).sum(axis=0)
l = 0
while ws[l] == 0 and l < len(ws):
l+= 1
r = ws.shape[0] - 1
while ws[r] == 0 and r > 0:
r -= 1
hs = img.sum(axis=2).sum(axis=1)
t = 0
while hs[t] == 0 and t < len(hs):
t += 1
b = hs.shape[0] - 1
while hs[b] == 0 and b > 0:
b -= 1
self.imgs[img_id] = self.imgs[img_id][t:b+1, l:r+1].copy()
def project_3d_to_bird(self, pt):
pt[0] += self.world_size / 2
pt[1] = self.world_size - pt[1]
pt = pt * self.out_size / self.world_size
return pt.astype(np.int32)
def add_ct_detection(
self, img, dets, show_box=False, show_txt=True,
center_thresh=0.5, img_id='det'):
# dets: max_preds x 5
self.imgs[img_id] = img.copy()
if type(dets) == type({}):
for cat in dets:
for i in range(len(dets[cat])):
if dets[cat][i, 2] > center_thresh:
cl = (self.colors[cat, 0, 0]).tolist()
ct = dets[cat][i, :2].astype(np.int32)
if show_box:
w, h = dets[cat][i, -2], dets[cat][i, -1]
x, y = dets[cat][i, 0], dets[cat][i, 1]
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
dtype=np.float32)
self.add_coco_bbox(
bbox, cat - 1, dets[cat][i, 2],
show_txt=show_txt, img_id=img_id)
else:
for i in range(len(dets)):
if dets[i, 2] > center_thresh:
# print('dets', dets[i])
cat = int(dets[i, -1])
cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \
255 - self.colors[cat, 0, 0]).tolist()
ct = dets[i, :2].astype(np.int32) * self.down_ratio
cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1)
if show_box:
w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio
x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
dtype=np.float32)
self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id)
def add_3d_detection(self, results, calib, img_id='default', show_txt=False):
dim = results[32:35]
pos = results[36:39]
ori = results[35]
cat = int(results[40])
pos[1] = pos[1] + dim[0] / 2
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
# dim = dim / self.dim_scale
#cl = self.names[cat]
cl = (self.colors[cat, 0, 0]).tolist()
box_3d = compute_box_3d(dim, pos, ori)
box_2d = self.project_to_image(box_3d, calib)
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
def project_to_image(self, pts_3d, P):
# pts_3d: n x 3
# P: 3 x 4
# return: n x 2
pts_3d_homo = np.concatenate(
[pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)
pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]
# import pdb; pdb.set_trace()
return pts_2d
def compose_vis_add(
self, img_path, dets, calib,
center_thresh, pred, bev, img_id='out'):
self.imgs[img_id] = cv2.imread(img_path)
# h, w = self.imgs[img_id].shape[:2]
# pred = cv2.resize(pred, (h, w))
h, w = pred.shape[:2]
hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
self.add_blend_img(self.imgs[img_id], pred, img_id)
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
# dim = dim / self.dim_scale
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
box_2d[:, 0] /= hs
box_2d[:, 1] /= ws
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
self.imgs[img_id] = np.concatenate(
[self.imgs[img_id], self.imgs[bev]], axis=1)
def add_2d_detection(
self, img, dets, show_box=False, show_txt=True,
center_thresh=0.5, img_id='det'):
self.imgs[img_id] = img
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
bbox = dets[cat][i, 1:5]
self.add_coco_bbox(
bbox, cat - 1, dets[cat][i, -1],
show_txt=show_txt, img_id=img_id)
def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'):
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for cat in dets:
cl = (self.colors[cat - 1, 0, 0]).tolist()
lc = (250, 152, 12)
for i in range(len(dets[cat])):
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
# cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1)
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'):
alpha = 0.5
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for ii, (dets, lc, cc) in enumerate(
[(dets_gt, (12, 49, 250), (0, 0, 255)),
(dets_dt, (250, 152, 12), (255, 0, 0))]):
# cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3)
for cat in dets:
cl = (self.colors[cat - 1, 0, 0]).tolist()
for i in range(len(dets[cat])):
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
if ii == 0:
cv2.fillPoly(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
lc,lineType=cv2.LINE_AA)
else:
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
# for e in [[0, 1], [1, 2], [2, 3], [3, 0]]:
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
kitti_class_name = [
'Car', 'Pedestrian', 'Cyclist'
]
gta_class_name = [
'p', 'v'
]
pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
"car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
coco_class_name = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
color_list = np.array(
[
1.000, 1.000, 1.000,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
0.000, 0.447, 0.741,
0.50, 0.5, 0
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
| 39.135057
| 97
| 0.519311
|
2651376e2d81de35ad9c2d42420d0afa8be407aa
| 1,661
|
py
|
Python
|
search_engine/controller.py
|
cuong1181998/IR-IT4853
|
6b18832e99a76764ebbc1e52dc5efb54f5d95879
|
[
"MIT"
] | 2
|
2020-04-24T08:41:08.000Z
|
2021-07-21T04:19:44.000Z
|
search_engine/controller.py
|
cuong1181998/IR-IT4853
|
6b18832e99a76764ebbc1e52dc5efb54f5d95879
|
[
"MIT"
] | null | null | null |
search_engine/controller.py
|
cuong1181998/IR-IT4853
|
6b18832e99a76764ebbc1e52dc5efb54f5d95879
|
[
"MIT"
] | null | null | null |
import pysolr
import json
from pyvi import ViTokenizer
import utils
SOLR_SERVER = 'http://localhost:8983/solr/IT4853'
def connect_solr():
try:
solr = pysolr.Solr(SOLR_SERVER, always_commit=True)
solr.ping() # check solr alive
print("Connection success!!")
return solr
except Exception:
print("[ERROR] Connect_error: Something went wrong!")
return
def search (query, page=1):
try:
solr = connect_solr()
list_words = ViTokenizer.tokenize(query).split()
stopwords = utils.get_stopwords()
words = [] # word after remove stop word
for word in list_words:
if word not in stopwords:
words.append(word)
if len(words) == 0:
return { "results": [], "numFound": 0 }
else:
clean_query = ' '.join(words)
page = int(page)
results = solr.search("content_clean:{}".format(clean_query), **{'fl': '*, score', 'start': "{}".format((page - 1)*10)})
return { "results": results, "numFound": results.raw_response['response']['numFound']}
except Exception:
print("[ERROR] search error: Something went wrong!")
def search_synonym (query):
try:
solr = connect_solr()
list_words = ViTokenizer.tokenize(query).split()
stopwords = utils.get_stopwords()
words = [] # word after remove stop word
for word in list_words:
if word not in stopwords:
words.append(word)
except Exception:
print("[ERROR] search synoym error: Something went wrong!")
search("", 1)
| 30.2
| 132
| 0.584588
|
a84da7cf5f6cc7a931b96a60108034b77bc84cdb
| 820
|
py
|
Python
|
setup.py
|
realiti4/pip-upgrade
|
578885d6206159b939cfc38d5a2f95d3d4afb193
|
[
"MIT"
] | 17
|
2020-10-14T12:25:59.000Z
|
2021-12-14T14:53:05.000Z
|
setup.py
|
realiti4/pip-upgrade
|
578885d6206159b939cfc38d5a2f95d3d4afb193
|
[
"MIT"
] | 5
|
2020-08-24T21:14:24.000Z
|
2021-12-05T20:33:58.000Z
|
setup.py
|
realiti4/pip-upgrade
|
578885d6206159b939cfc38d5a2f95d3d4afb193
|
[
"MIT"
] | 4
|
2020-12-01T12:16:42.000Z
|
2022-02-08T18:43:23.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pip-upgrade-tool",
version="0.5.2",
author="Onur Cetinkol",
author_email="realiti44@gmail.com",
description="An easy tool for upgrading all of your packages while not breaking dependencies",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/realiti4/pip-upgrade",
entry_points = {
'console_scripts': ['pip-upgrade = pip_upgrade:main'],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
packages=["pip_upgrade"],
install_requires=["packaging"],
)
| 30.37037
| 98
| 0.659756
|
980baba3281581f32eec9b807d505667c9141cab
| 6,672
|
py
|
Python
|
tests/background_tasks_test.py
|
robfrut135/PerfKitBenchmarker
|
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
|
[
"Apache-2.0"
] | null | null | null |
tests/background_tasks_test.py
|
robfrut135/PerfKitBenchmarker
|
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
|
[
"Apache-2.0"
] | null | null | null |
tests/background_tasks_test.py
|
robfrut135/PerfKitBenchmarker
|
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.background_tasks."""
import functools
import multiprocessing
import multiprocessing.managers
import os
import signal
import threading
import unittest
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import errors
from tests import pkb_common_test_case
def _ReturnArgs(a, b=None):
return b, a
def _RaiseValueError():
raise ValueError('ValueError')
def _IncrementCounter(lock, counter):
with lock:
counter.value += 1
def _AppendLength(int_list):
int_list.append(len(int_list))
def _WaitAndAppendInt(int_list, int_to_append, event=None, timeout=None):
if event:
event.wait(timeout)
int_list.append(int_to_append)
class GetCallStringTestCase(pkb_common_test_case.PkbCommonTestCase):
def testNoArgs(self):
result = background_tasks._GetCallString((_ReturnArgs, (), {}))
self.assertEqual(result, '_ReturnArgs()')
def testArgs(self):
result = background_tasks._GetCallString((_ReturnArgs, ('blue', 5), {}))
self.assertEqual(result, '_ReturnArgs(blue, 5)')
def testKwargs(self):
result = background_tasks._GetCallString((_ReturnArgs, (), {'x': 8}))
self.assertEqual(result, '_ReturnArgs(x=8)')
def testArgsAndKwargs(self):
result = background_tasks._GetCallString((_ReturnArgs, ('blue', 5),
{'x': 8}))
self.assertEqual(result, '_ReturnArgs(blue, 5, x=8)')
def testSinglePartial(self):
_ReturnArgs2 = functools.partial(_ReturnArgs, 1, x=2)
result = background_tasks._GetCallString((_ReturnArgs2, (), {}))
self.assertEqual(result, '_ReturnArgs(1, x=2)')
result = background_tasks._GetCallString((_ReturnArgs2, ('blue', 5),
{'x': 8}))
self.assertEqual(result, '_ReturnArgs(1, blue, 5, x=8)')
def testDoublePartial(self):
_ReturnArgs2 = functools.partial(_ReturnArgs, 1, x=2)
_ReturnArgs3 = functools.partial(_ReturnArgs2, 3, x=4)
result = background_tasks._GetCallString((_ReturnArgs3, (), {}))
self.assertEqual(result, '_ReturnArgs(1, 3, x=4)')
result = background_tasks._GetCallString((_ReturnArgs3, ('blue', 5),
{'x': 8}))
self.assertEqual(result, '_ReturnArgs(1, 3, blue, 5, x=8)')
class RunParallelThreadsTestCase(pkb_common_test_case.PkbCommonTestCase):
def testFewerThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(2)]
result = background_tasks.RunParallelThreads(calls, max_concurrency=4)
self.assertEqual(result, [(0, 'a'), (1, 'a')])
def testMoreThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(10)]
result = background_tasks.RunParallelThreads(calls, max_concurrency=4)
self.assertEqual(result, [(i, 'a') for i in range(10)])
def testException(self):
int_list = []
calls = [(_AppendLength, (int_list,), {}), (_RaiseValueError, (), {}),
(_AppendLength, (int_list,), {})]
with self.assertRaises(errors.VmUtil.ThreadException):
background_tasks.RunParallelThreads(calls, max_concurrency=1)
self.assertEqual(int_list, [0, 1])
def testInterrupt(self):
# Uses RunParallelThreads to try to run four threads:
# 0: Waits 5 seconds and adds 0 to int_list.
# 1: Adds 1 to int_list.
# 2: Sends a SIGINT to the current process.
# 3: Waits 5 seconds and adds 3 to int_list.
# Since the max_concurrency is set to 2, what should happen is that thread 0
# waits, thread 1 succeeds, thread 2 sends the SIGINT, and then neither
# thread 1 nor 3 is able to append to int_list.
int_list = []
event = threading.Event()
calls = [(_WaitAndAppendInt, (int_list, 0, event, 5), {}),
(_WaitAndAppendInt, (int_list, 1), {}),
(os.kill, (os.getpid(), signal.SIGINT), {}),
(_WaitAndAppendInt, (int_list, 3, event, 5), {})]
with self.assertRaises(KeyboardInterrupt):
background_tasks.RunParallelThreads(calls, max_concurrency=2)
self.assertEqual(int_list, [1])
class RunThreadedTestCase(pkb_common_test_case.PkbCommonTestCase):
def testNonListParams(self):
with self.assertRaises(ValueError):
background_tasks.RunThreaded(_ReturnArgs, 'blue')
def testNoParams(self):
result = background_tasks.RunThreaded(_ReturnArgs, [])
self.assertEqual(result, [])
def testInvalidTupleParams(self):
with self.assertRaises(ValueError):
background_tasks.RunThreaded(_ReturnArgs, [('blue', 'red')])
def testSimpleListParams(self):
result = background_tasks.RunThreaded(_ReturnArgs, ['blue', 'red'])
self.assertEqual(result, [(None, 'blue'), (None, 'red')])
def testListOfTupleParams(self):
result = background_tasks.RunThreaded(
_ReturnArgs, [(('red',), {}), (('green',), {'b': 'blue'})])
self.assertEqual(result, [(None, 'red'), ('blue', 'green')])
class RunParallelProcessesTestCase(pkb_common_test_case.PkbCommonTestCase):
def testFewerThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(2)]
result = background_tasks.RunParallelProcesses(calls, max_concurrency=4)
self.assertEqual(result, [(0, 'a'), (1, 'a')])
def testMoreThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(10)]
result = background_tasks.RunParallelProcesses(calls, max_concurrency=4)
self.assertEqual(result, [(i, 'a') for i in range(10)])
def testException(self):
manager = multiprocessing.managers.SyncManager()
manager.start()
lock = manager.Lock()
counter = manager.Value('i', 0)
calls = [(_IncrementCounter, (lock, counter), {}),
(_RaiseValueError, (), {}),
(_IncrementCounter, (lock, counter), {})]
with self.assertRaises(errors.VmUtil.CalledProcessException):
background_tasks.RunParallelProcesses(calls, max_concurrency=1)
self.assertEqual(counter.value, 2)
if __name__ == '__main__':
unittest.main()
| 36.861878
| 80
| 0.68705
|
0fa4cb0d66743749e629dd92397d7d889379fc6f
| 7,017
|
py
|
Python
|
custom_components/pfsense_fauxapi/switch.py
|
jabastien/pfSense-FauxAPI-ha
|
1c5be9787b51723e5aa2f86acc511bd6b6196cfc
|
[
"MIT"
] | 15
|
2020-11-25T22:17:20.000Z
|
2021-09-20T18:59:43.000Z
|
custom_components/pfsense_fauxapi/switch.py
|
jabastien/pfSense-FauxAPI-ha
|
1c5be9787b51723e5aa2f86acc511bd6b6196cfc
|
[
"MIT"
] | 11
|
2020-12-04T01:02:33.000Z
|
2021-10-19T14:14:59.000Z
|
custom_components/pfsense_fauxapi/switch.py
|
jabastien/pfSense-FauxAPI-ha
|
1c5be9787b51723e5aa2f86acc511bd6b6196cfc
|
[
"MIT"
] | 3
|
2020-11-25T19:33:13.000Z
|
2021-08-25T00:58:23.000Z
|
"""
Switch Platform support for pfSense firewall rules.
For more details please refer to
https://github.com/dgshue/home-assistant-custom-components
Example usage:
configuration.yaml
---------------------------------------
switch:
- platform: pfsense_fauxapi
host: 192.168.1.1
port: 443
api_key: PFFA1QDKsadfsde2ffd
access_token: fsdfDSFewffsdfFwevsdfaFewwfffsEwrwesfdfCVvsdfwergsdfSDfwersdf
rule_filter: HomeAssistant
---------------------------------------
"""
import logging
import subprocess
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (
SwitchEntity, PLATFORM_SCHEMA, ENTITY_ID_FORMAT)
from homeassistant.const import (
CONF_FRIENDLY_NAME, CONF_SWITCHES, CONF_VALUE_TEMPLATE, CONF_HOST, CONF_PORT, CONF_API_KEY, CONF_ACCESS_TOKEN)
CONF_RULE_FILTER = 'rule_filter'
DOMAIN = "switch"
REQUIREMENTS = ['pfsense-fauxapi']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_RULE_FILTER): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Initialize the platform"""
"""Setup the pfSense Rules platform."""
import pprint, sys
from PfsenseFauxapi.PfsenseFauxapi import PfsenseFauxapi
# Assign configuration variables. The configuration check takes care they are
# present.
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
api_key = config.get(CONF_API_KEY)
access_token = config.get(CONF_ACCESS_TOKEN)
rule_prefix = config.get(CONF_RULE_FILTER)
fauxapi_host = '{}:{}'.format(host, port)
_LOGGER.debug("Connecting to pfSense firewall to collect rules to add as switches.")
try:
FauxapiLib = PfsenseFauxapi(fauxapi_host, api_key, access_token, debug=True)
# Get the current set of filters
filters = FauxapiLib.config_get('filter')
_LOGGER.debug("Found %s rules in pfSense", len(filters['rule']))
if rule_prefix:
_LOGGER.debug("Filter for rules starting with %s being applied", rule_prefix)
rules = []
# Iterate through and find rules
i = 0
for rule in filters['rule']:
if rule_prefix:
if (rule['descr'].startswith(rule_prefix)):
_LOGGER.debug("Found rule %s", rule['descr'])
new_rule = pfSense('pfsense_'+rule['descr'],rule['descr'],rule['tracker'], fauxapi_host, api_key, access_token)
rules.append(new_rule)
else:
_LOGGER.debug("Found rule %s", rule['descr'])
new_rule = pfSense('pfsense_'+rule['descr'],rule['descr'],rule['tracker'], fauxapi_host, api_key, access_token)
rules.append(new_rule)
i=i+1
# Add devices
add_entities(rules)
except Exception as e:
_LOGGER.error("Problem getting rule set from pfSense host: %s. Likely due to API key or secret. More Info:" + str(e), fauxapi_host)
class pfSense(SwitchEntity):
"""Representation of an pfSense Rule."""
def __init__(self, name, rule_name, tracker_id, fauxapi_host, api_key, access_token):
_LOGGER.info("Initialized pfSense Rule SWITCH %s", name)
"""Initialize an pfSense Rule as a switch."""
self._name = name
self._rule_name = rule_name
self._state = None
self._host = fauxapi_host
self._api_key = api_key
self._access_token = access_token
self._tracker_id = tracker_id
@property
def name(self):
return self._name
@property
def is_on(self):
return self._state
def turn_on(self, **kwargs):
self.set_rule_state(True)
def turn_off(self, **kwargs):
self.set_rule_state(False)
def update(self):
"""Check the current state of the rule in pfSense"""
import pprint, sys
from PfsenseFauxapi.PfsenseFauxapi import PfsenseFauxapi
_LOGGER.debug("Getting pfSense current rule state for %s", self._rule_name)
try:
# Setup connection with devices/cloud
FauxapiLib = PfsenseFauxapi(self._host, self._api_key, self._access_token, debug=True)
# Get the current set of filters
filters = FauxapiLib.config_get('filter')
for rule in filters['rule']:
if (rule['tracker'] == self._tracker_id):
_LOGGER.debug("Found rule with tracker %s, updating state.", self._tracker_id)
if ('disabled' in rule):
self._state = False
else:
self._state = True
except:
_LOGGER.error("Problem retrieving rule set from pfSense host: %s. Likely due to API key or secret.", self._host)
def set_rule_state(self, action):
"""Setup the pfSense Rules platform."""
import pprint, sys
from PfsenseFauxapi.PfsenseFauxapi import PfsenseFauxapi
_LOGGER.debug("Connecting to pfSense firewall to change rule states.")
try:
# Setup connection with devices/cloud
FauxapiLib = PfsenseFauxapi(self._host, self._api_key, self._access_token, debug=True)
# Get the current set of filters
filters = FauxapiLib.config_get('filter')
except:
_LOGGER.error("Problem retrieving rule set from pfSense host: %s. Likely due to API key or secret.", self._host)
i = 0
for rule in filters['rule']:
if (rule['tracker'] == self._tracker_id):
_LOGGER.info("Found rule changing state rule: %s", self._rule_name)
if (action == True):
if ('disabled' in rule):
del filters['rule'][i]['disabled']
_LOGGER.debug("Rule %s enabled in config (this has not been pushed back to firewall yet!)", self._rule_name)
elif (action == False):
filters['rule'][i]['disabled'] = ""
_LOGGER.debug("Rule %s disabled in config (this has not been pushed back to firewall yet!)", self._rule_name)
i=i+1
try:
_LOGGER.debug("Sending updated rule set to pfSense firewall")
# Push the config back to pfSense
filters = FauxapiLib.config_set(filters, 'filter')
_LOGGER.debug("Reloading the config on pfSense firewall to accept rule changes")
# Reload the config
FauxapiLib.send_event("filter reload")
except:
_LOGGER.error("Problem sending & reloading rule set from pfSense host: %s. Likely due to API key or secret.", self._host)
| 36.357513
| 140
| 0.633747
|
63e01113072e119179297dd78228fac733f503d7
| 3,707
|
py
|
Python
|
tests/test.py
|
BaharYilmaz/MAX-OCR
|
d5c644165867f0022b96905156cf51892a5ae21c
|
[
"Apache-2.0"
] | 36
|
2019-11-04T22:57:42.000Z
|
2022-03-03T07:08:56.000Z
|
tests/test.py
|
BaharYilmaz/MAX-OCR
|
d5c644165867f0022b96905156cf51892a5ae21c
|
[
"Apache-2.0"
] | 8
|
2019-11-05T12:28:46.000Z
|
2022-01-13T01:47:44.000Z
|
tests/test.py
|
BaharYilmaz/MAX-OCR
|
d5c644165867f0022b96905156cf51892a5ae21c
|
[
"Apache-2.0"
] | 26
|
2019-10-29T04:41:32.000Z
|
2022-03-24T13:21:02.000Z
|
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import requests
def test_swagger():
model_endpoint = 'http://localhost:5000/swagger.json'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
assert r.headers['Content-Type'] == 'application/json'
json = r.json()
assert 'swagger' in json
assert json.get('info') and json.get('info').get('title') == 'MAX OCR'
def test_metadata():
model_endpoint = 'http://localhost:5000/model/metadata'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
metadata = r.json()
assert metadata['id'] == 'max-ocr'
assert metadata['name'] == 'MAX OCR'
assert metadata['description'] == 'Identify text in an image.'
assert metadata['license'] == 'Apache v2'
assert metadata['type'] == 'Optical Character Recognition'
assert 'max-ocr' in metadata['source']
def test_predict():
model_endpoint = 'http://localhost:5000/model/predict'
file_path = 'samples/quick_start_watson_studio.jpg'
with open(file_path, 'rb') as file:
file_form = {'image': (file_path, file, 'image/jpg')}
r = requests.post(url=model_endpoint, files=file_form)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
assert len(response) == 2
assert len(response['text'][1]) == 6
assert response['text'][0][0] == 'Quick Start with Watson Studio'
assert response['text'][1][3] == 'but Watson Studio offers all of the frameworks and languages that'
def test_predict_with_numbers():
model_endpoint = 'http://localhost:5000/model/predict'
file_path = 'samples/text_with_numbers.png'
with open(file_path, 'rb') as file:
file_form = {'image': (file_path, file, 'image/png')}
r = requests.post(url=model_endpoint, files=file_form)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
assert len(response) == 2
assert len(response['text'][1]) == 1
assert '1531752157593' in response['text'][3][0]
def test_predict_jpeg():
model_endpoint = 'http://localhost:5000/model/predict'
file_path = 'samples/chap4_summary.jpg'
with open(file_path, 'rb') as file:
file_form = {'image': (file_path, file, 'image/jpg')}
r = requests.post(url=model_endpoint, files=file_form)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
assert len(response) == 2
assert len(response['text'][0]) == 5
assert response['text'][0][1] == 'Many of its core concepts, however, can be understood with simple'
def test_invalid():
model_endpoint = 'http://localhost:5000/model/predict'
file_path = 'tests/test.py'
with open(file_path, 'rb') as file:
file_form = {'image': (file_path, file, 'image/png')}
r = requests.post(url=model_endpoint, files=file_form)
assert r.status_code == 400
response = r.json()
assert response['status'] == 'error'
assert response['message'] == 'The provided input is not a valid image.'
if __name__ == '__main__':
pytest.main([__file__])
| 31.151261
| 104
| 0.670623
|
f0c6c817089bd2013e1f849729492758c3fdc3cd
| 2,092
|
py
|
Python
|
examples/ex09b_update_about.py
|
bstienen/phenoai
|
d7210aa2d0f89573724d0301e0dfeffb87c26646
|
[
"MIT"
] | 2
|
2018-08-31T01:18:14.000Z
|
2020-07-25T05:57:49.000Z
|
examples/ex09b_update_about.py
|
bstienen/phenoai
|
d7210aa2d0f89573724d0301e0dfeffb87c26646
|
[
"MIT"
] | null | null | null |
examples/ex09b_update_about.py
|
bstienen/phenoai
|
d7210aa2d0f89573724d0301e0dfeffb87c26646
|
[
"MIT"
] | 1
|
2020-07-25T05:57:38.000Z
|
2020-07-25T05:57:38.000Z
|
"""
Example 09b: Update About
=========================
Apart from being able to read and use AInalyses, PhenoAI also implements a module
to create new AInalyses from an existing estimator or to alter an existing
AInalysis. This module, the maker module, is used in this example to create
an AInalysis that implements a classifier AInalysis based on an existing
AInalysis. This has as advantage that not all information has to be provided
again, but elements like the estimator, configuration or data can just be reused
in the new AInalysis. In this example everything from the original AInalysis is
reused and only the about page is updated in the new version.
The AInalysis that is used as a base in this example is the AInalysis created by
example 08a. In order for this script to run correctly, example 08a has to be
run first.
Example 09a and 09c show how to update the entire AInalysis, reusing only the
configuration, and how to update the AInalysis checksums respectively.
"""
# Check if example 08a was run
import os
if not os.path.exists("./my_first_ainalysis"):
raise Exception("The AInalysis 'my_first_ainalysis' as created by example 08a could not be found in the 'examples' folder. Run example 08a to create this AInalysis.")
# Load PhenoAI and make sure EVERYTHING is logged to the screen
from phenoai import logger
from phenoai import maker
logger.to_stream(lvl=0)
# Create AInalysis maker
m = maker.AInalysisMaker(
default_id="my_own_classifier",
location="./my_updated_first_ainalysis_09b",
versionnumber=1,
overwrite=True)
# Add meta information
m.set_about("Test AInalysis from example09b", "This AInalysis is created by the 08a example and serves no purpose other than showcasing how AInalyses are made. Only the meta information (this about file) should be different from the 08a AInalysis.")
m.add_author("Bob Stienen", "b.stienen@science.ru.nl")
m.add_author("Sascha Caron", "scaron@nikhef.nl")
# Define dependency versions (on top of used versions)
m.load("./my_first_ainalysis",
load_estimator=True,
load_data=True)
# Create AInalysis
m.make()
| 36.701754
| 249
| 0.779637
|
a58c46b2d99ca6e427dc2ce8c5104d0613a012d2
| 27,398
|
py
|
Python
|
tests/test_serializer.py
|
daleione/django-rest-framework
|
f81ca786427db40b648b5bcc0e67044163215457
|
[
"BSD-3-Clause"
] | 4
|
2019-02-11T13:01:34.000Z
|
2020-10-22T08:39:57.000Z
|
tests/test_serializer.py
|
daleione/django-rest-framework
|
f81ca786427db40b648b5bcc0e67044163215457
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_serializer.py
|
daleione/django-rest-framework
|
f81ca786427db40b648b5bcc0e67044163215457
|
[
"BSD-3-Clause"
] | 2
|
2019-03-01T11:55:31.000Z
|
2019-07-07T09:58:16.000Z
|
import inspect
import pickle
import re
from collections import ChainMap
from collections.abc import Mapping
import pytest
from django.db import models
from rest_framework import exceptions, fields, relations, serializers
from rest_framework.fields import Field
from .models import (
ForeignKeyTarget, NestedForeignKeySource, NullableForeignKeySource
)
from .utils import MockObject
# Test serializer fields imports.
# -------------------------------
class TestFieldImports:
def is_field(self, name, value):
return (
isinstance(value, type) and
issubclass(value, Field) and
not name.startswith('_')
)
def test_fields(self):
msg = "Expected `fields.%s` to be imported in `serializers`"
field_classes = [
key for key, value
in inspect.getmembers(fields)
if self.is_field(key, value)
]
# sanity check
assert 'Field' in field_classes
assert 'BooleanField' in field_classes
for field in field_classes:
assert hasattr(serializers, field), msg % field
def test_relations(self):
msg = "Expected `relations.%s` to be imported in `serializers`"
field_classes = [
key for key, value
in inspect.getmembers(relations)
if self.is_field(key, value)
]
# sanity check
assert 'RelatedField' in field_classes
for field in field_classes:
assert hasattr(serializers, field), msg % field
# Tests for core functionality.
# -----------------------------
class TestSerializer:
def setup(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
self.Serializer = ExampleSerializer
def test_valid_serializer(self):
serializer = self.Serializer(data={'char': 'abc', 'integer': 123})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_invalid_serializer(self):
serializer = self.Serializer(data={'char': 'abc'})
assert not serializer.is_valid()
assert serializer.validated_data == {}
assert serializer.data == {'char': 'abc'}
assert serializer.errors == {'integer': ['This field is required.']}
def test_invalid_datatype(self):
serializer = self.Serializer(data=[{'char': 'abc'}])
assert not serializer.is_valid()
assert serializer.validated_data == {}
assert serializer.data == {}
assert serializer.errors == {'non_field_errors': ['Invalid data. Expected a dictionary, but got list.']}
def test_partial_validation(self):
serializer = self.Serializer(data={'char': 'abc'}, partial=True)
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc'}
assert serializer.errors == {}
def test_empty_serializer(self):
serializer = self.Serializer()
assert serializer.data == {'char': '', 'integer': None}
def test_missing_attribute_during_serialization(self):
class MissingAttributes:
pass
instance = MissingAttributes()
serializer = self.Serializer(instance)
with pytest.raises(AttributeError):
serializer.data
def test_data_access_before_save_raises_error(self):
def create(validated_data):
return validated_data
serializer = self.Serializer(data={'char': 'abc', 'integer': 123})
serializer.create = create
assert serializer.is_valid()
assert serializer.data == {'char': 'abc', 'integer': 123}
with pytest.raises(AssertionError):
serializer.save()
def test_validate_none_data(self):
data = None
serializer = self.Serializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['No data provided']}
def test_serialize_chainmap(self):
data = ChainMap({'char': 'abc'}, {'integer': 123})
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_serialize_custom_mapping(self):
class SinglePurposeMapping(Mapping):
def __getitem__(self, key):
return 'abc' if key == 'char' else 123
def __iter__(self):
yield 'char'
yield 'integer'
def __len__(self):
return 2
serializer = self.Serializer(data=SinglePurposeMapping())
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_custom_to_internal_value(self):
"""
to_internal_value() is expected to return a dict, but subclasses may
return application specific type.
"""
class Point:
def __init__(self, srid, x, y):
self.srid = srid
self.coords = (x, y)
# Declares a serializer that converts data into an object
class NestedPointSerializer(serializers.Serializer):
longitude = serializers.FloatField(source='x')
latitude = serializers.FloatField(source='y')
def to_internal_value(self, data):
kwargs = super().to_internal_value(data)
return Point(srid=4326, **kwargs)
serializer = NestedPointSerializer(data={'longitude': 6.958307, 'latitude': 50.941357})
assert serializer.is_valid()
assert isinstance(serializer.validated_data, Point)
assert serializer.validated_data.srid == 4326
assert serializer.validated_data.coords[0] == 6.958307
assert serializer.validated_data.coords[1] == 50.941357
assert serializer.errors == {}
def test_iterable_validators(self):
"""
Ensure `validators` parameter is compatible with reasonable iterables.
"""
data = {'char': 'abc', 'integer': 123}
for validators in ([], (), set()):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField(validators=validators)
integer = serializers.IntegerField()
serializer = ExampleSerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == data
assert serializer.errors == {}
def raise_exception(value):
raise exceptions.ValidationError('Raised error')
for validators in ([raise_exception], (raise_exception,), {raise_exception}):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField(validators=validators)
integer = serializers.IntegerField()
serializer = ExampleSerializer(data=data)
assert not serializer.is_valid()
assert serializer.data == data
assert serializer.validated_data == {}
assert serializer.errors == {'char': [
exceptions.ErrorDetail(string='Raised error', code='invalid')
]}
class TestValidateMethod:
def test_non_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError('Non field error')
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['Non field error']}
def test_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError({'char': 'Field error'})
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'char': ['Field error']}
class TestBaseSerializer:
def setup(self):
class ExampleSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return {
'id': obj['id'],
'email': obj['name'] + '@' + obj['domain']
}
def to_internal_value(self, data):
name, domain = str(data['email']).split('@')
return {
'id': int(data['id']),
'name': name,
'domain': domain,
}
self.Serializer = ExampleSerializer
def test_abstract_methods_raise_proper_errors(self):
serializer = serializers.BaseSerializer()
with pytest.raises(NotImplementedError):
serializer.to_internal_value(None)
with pytest.raises(NotImplementedError):
serializer.to_representation(None)
with pytest.raises(NotImplementedError):
serializer.update(None, None)
with pytest.raises(NotImplementedError):
serializer.create(None)
def test_access_to_data_attribute_before_validation_raises_error(self):
serializer = serializers.BaseSerializer(data={'foo': 'bar'})
with pytest.raises(AssertionError):
serializer.data
def test_access_to_errors_attribute_before_validation_raises_error(self):
serializer = serializers.BaseSerializer(data={'foo': 'bar'})
with pytest.raises(AssertionError):
serializer.errors
def test_access_to_validated_data_attribute_before_validation_raises_error(self):
serializer = serializers.BaseSerializer(data={'foo': 'bar'})
with pytest.raises(AssertionError):
serializer.validated_data
def test_serialize_instance(self):
instance = {'id': 1, 'name': 'tom', 'domain': 'example.com'}
serializer = self.Serializer(instance)
assert serializer.data == {'id': 1, 'email': 'tom@example.com'}
def test_serialize_list(self):
instances = [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'},
]
serializer = self.Serializer(instances, many=True)
assert serializer.data == [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'}
]
def test_validate_data(self):
data = {'id': 1, 'email': 'tom@example.com'}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'id': 1,
'name': 'tom',
'domain': 'example.com'
}
def test_validate_list(self):
data = [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'},
]
serializer = self.Serializer(data=data, many=True)
assert serializer.is_valid()
assert serializer.validated_data == [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'}
]
class TestStarredSource:
"""
Tests for `source='*'` argument, which is often used for complex field or
nested representations.
For example:
nested_field = NestedField(source='*')
"""
data = {
'nested1': {'a': 1, 'b': 2},
'nested2': {'c': 3, 'd': 4}
}
def setup(self):
class NestedSerializer1(serializers.Serializer):
a = serializers.IntegerField()
b = serializers.IntegerField()
class NestedSerializer2(serializers.Serializer):
c = serializers.IntegerField()
d = serializers.IntegerField()
class NestedBaseSerializer(serializers.Serializer):
nested1 = NestedSerializer1(source='*')
nested2 = NestedSerializer2(source='*')
# nullable nested serializer testing
class NullableNestedSerializer(serializers.Serializer):
nested = NestedSerializer1(source='*', allow_null=True)
# nullable custom field testing
class CustomField(serializers.Field):
def to_representation(self, instance):
return getattr(instance, 'foo', None)
def to_internal_value(self, data):
return {'foo': data}
class NullableFieldSerializer(serializers.Serializer):
field = CustomField(source='*', allow_null=True)
self.Serializer = NestedBaseSerializer
self.NullableNestedSerializer = NullableNestedSerializer
self.NullableFieldSerializer = NullableFieldSerializer
def test_nested_validate(self):
"""
A nested representation is validated into a flat internal object.
"""
serializer = self.Serializer(data=self.data)
assert serializer.is_valid()
assert serializer.validated_data == {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
def test_nested_null_validate(self):
serializer = self.NullableNestedSerializer(data={'nested': None})
# validation should fail (but not error) since nested fields are required
assert not serializer.is_valid()
def test_nested_serialize(self):
"""
An object can be serialized into a nested representation.
"""
instance = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
serializer = self.Serializer(instance)
assert serializer.data == self.data
def test_field_validate(self):
serializer = self.NullableFieldSerializer(data={'field': 'bar'})
# validation should pass since no internal validation
assert serializer.is_valid()
assert serializer.validated_data == {'foo': 'bar'}
def test_field_null_validate(self):
serializer = self.NullableFieldSerializer(data={'field': None})
# validation should pass since no internal validation
assert serializer.is_valid()
assert serializer.validated_data == {'foo': None}
class TestIncorrectlyConfigured:
def test_incorrect_field_name(self):
class ExampleSerializer(serializers.Serializer):
incorrect_name = serializers.IntegerField()
class ExampleObject:
def __init__(self):
self.correct_name = 123
instance = ExampleObject()
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError) as exc_info:
serializer.data
msg = str(exc_info.value)
assert msg.startswith(
"Got AttributeError when attempting to get a value for field `incorrect_name` on serializer `ExampleSerializer`.\n"
"The serializer field might be named incorrectly and not match any attribute or key on the `ExampleObject` instance.\n"
"Original exception text was:"
)
class TestNotRequiredOutput:
def test_not_required_output_for_dict(self):
"""
'required=False' should allow a dictionary key to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
assert serializer.data == {'included': 'abc'}
def test_not_required_output_for_object(self):
"""
'required=False' should allow an object attribute to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
def create(self, validated_data):
return MockObject(**validated_data)
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
serializer.save()
assert serializer.data == {'included': 'abc'}
class TestDefaultOutput:
def setup(self):
class ExampleSerializer(serializers.Serializer):
has_default = serializers.CharField(default='x')
has_default_callable = serializers.CharField(default=lambda: 'y')
no_default = serializers.CharField()
self.Serializer = ExampleSerializer
def test_default_used_for_dict(self):
"""
'default="something"' should be used if dictionary key is missing from input.
"""
serializer = self.Serializer({'no_default': 'abc'})
assert serializer.data == {'has_default': 'x', 'has_default_callable': 'y', 'no_default': 'abc'}
def test_default_used_for_object(self):
"""
'default="something"' should be used if object attribute is missing from input.
"""
instance = MockObject(no_default='abc')
serializer = self.Serializer(instance)
assert serializer.data == {'has_default': 'x', 'has_default_callable': 'y', 'no_default': 'abc'}
def test_default_not_used_when_in_dict(self):
"""
'default="something"' should not be used if dictionary key is present in input.
"""
serializer = self.Serializer({'has_default': 'def', 'has_default_callable': 'ghi', 'no_default': 'abc'})
assert serializer.data == {'has_default': 'def', 'has_default_callable': 'ghi', 'no_default': 'abc'}
def test_default_not_used_when_in_object(self):
"""
'default="something"' should not be used if object attribute is present in input.
"""
instance = MockObject(has_default='def', has_default_callable='ghi', no_default='abc')
serializer = self.Serializer(instance)
assert serializer.data == {'has_default': 'def', 'has_default_callable': 'ghi', 'no_default': 'abc'}
def test_default_for_dotted_source(self):
"""
'default="something"' should be used when a traversed attribute is missing from input.
"""
class Serializer(serializers.Serializer):
traversed = serializers.CharField(default='x', source='traversed.attr')
assert Serializer({}).data == {'traversed': 'x'}
assert Serializer({'traversed': {}}).data == {'traversed': 'x'}
assert Serializer({'traversed': None}).data == {'traversed': 'x'}
assert Serializer({'traversed': {'attr': 'abc'}}).data == {'traversed': 'abc'}
def test_default_for_multiple_dotted_source(self):
class Serializer(serializers.Serializer):
c = serializers.CharField(default='x', source='a.b.c')
assert Serializer({}).data == {'c': 'x'}
assert Serializer({'a': {}}).data == {'c': 'x'}
assert Serializer({'a': None}).data == {'c': 'x'}
assert Serializer({'a': {'b': {}}}).data == {'c': 'x'}
assert Serializer({'a': {'b': None}}).data == {'c': 'x'}
assert Serializer({'a': {'b': {'c': 'abc'}}}).data == {'c': 'abc'}
# Same test using model objects to exercise both paths in
# rest_framework.fields.get_attribute() (#5880)
class ModelSerializer(serializers.Serializer):
target = serializers.CharField(default='x', source='target.target.name')
a = NestedForeignKeySource(name="Root Object", target=None)
assert ModelSerializer(a).data == {'target': 'x'}
b = NullableForeignKeySource(name="Intermediary Object", target=None)
a.target = b
assert ModelSerializer(a).data == {'target': 'x'}
c = ForeignKeyTarget(name="Target Object")
b.target = c
assert ModelSerializer(a).data == {'target': 'Target Object'}
def test_default_for_nested_serializer(self):
class NestedSerializer(serializers.Serializer):
a = serializers.CharField(default='1')
c = serializers.CharField(default='2', source='b.c')
class Serializer(serializers.Serializer):
nested = NestedSerializer()
assert Serializer({'nested': None}).data == {'nested': None}
assert Serializer({'nested': {}}).data == {'nested': {'a': '1', 'c': '2'}}
assert Serializer({'nested': {'a': '3', 'b': {}}}).data == {'nested': {'a': '3', 'c': '2'}}
assert Serializer({'nested': {'a': '3', 'b': {'c': '4'}}}).data == {'nested': {'a': '3', 'c': '4'}}
def test_default_for_allow_null(self):
"""
Without an explicit default, allow_null implies default=None when serializing. #5518 #5708
"""
class Serializer(serializers.Serializer):
foo = serializers.CharField()
bar = serializers.CharField(source='foo.bar', allow_null=True)
optional = serializers.CharField(required=False, allow_null=True)
# allow_null=True should imply default=None when serializing:
assert Serializer({'foo': None}).data == {'foo': None, 'bar': None, 'optional': None, }
class TestCacheSerializerData:
def test_cache_serializer_data(self):
"""
Caching serializer data with pickle will drop the serializer info,
but does preserve the data itself.
"""
class ExampleSerializer(serializers.Serializer):
field1 = serializers.CharField()
field2 = serializers.CharField()
serializer = ExampleSerializer({'field1': 'a', 'field2': 'b'})
pickled = pickle.dumps(serializer.data)
data = pickle.loads(pickled)
assert data == {'field1': 'a', 'field2': 'b'}
class TestDefaultInclusions:
def setup(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField(default='abc')
integer = serializers.IntegerField()
self.Serializer = ExampleSerializer
def test_default_should_included_on_create(self):
serializer = self.Serializer(data={'integer': 456})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 456}
assert serializer.errors == {}
def test_default_should_be_included_on_update(self):
instance = MockObject(char='def', integer=123)
serializer = self.Serializer(instance, data={'integer': 456})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 456}
assert serializer.errors == {}
def test_default_should_not_be_included_on_partial_update(self):
instance = MockObject(char='def', integer=123)
serializer = self.Serializer(instance, data={'integer': 456}, partial=True)
assert serializer.is_valid()
assert serializer.validated_data == {'integer': 456}
assert serializer.errors == {}
class TestSerializerValidationWithCompiledRegexField:
def setup(self):
class ExampleSerializer(serializers.Serializer):
name = serializers.RegexField(re.compile(r'\d'), required=True)
self.Serializer = ExampleSerializer
def test_validation_success(self):
serializer = self.Serializer(data={'name': '2'})
assert serializer.is_valid()
assert serializer.validated_data == {'name': '2'}
assert serializer.errors == {}
class Test2555Regression:
def test_serializer_context(self):
class NestedSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# .context should not cache
self.context
class ParentSerializer(serializers.Serializer):
nested = NestedSerializer()
serializer = ParentSerializer(data={}, context={'foo': 'bar'})
assert serializer.context == {'foo': 'bar'}
assert serializer.fields['nested'].context == {'foo': 'bar'}
class Test4606Regression:
def setup(self):
class ExampleSerializer(serializers.Serializer):
name = serializers.CharField(required=True)
choices = serializers.CharField(required=True)
self.Serializer = ExampleSerializer
def test_4606_regression(self):
serializer = self.Serializer(data=[{"name": "liz"}], many=True)
with pytest.raises(serializers.ValidationError):
serializer.is_valid(raise_exception=True)
class TestDeclaredFieldInheritance:
def test_declared_field_disabling(self):
class Parent(serializers.Serializer):
f1 = serializers.CharField()
f2 = serializers.CharField()
class Child(Parent):
f1 = None
class Grandchild(Child):
pass
assert len(Parent._declared_fields) == 2
assert len(Child._declared_fields) == 1
assert len(Grandchild._declared_fields) == 1
def test_meta_field_disabling(self):
# Declaratively setting a field on a child class will *not* prevent
# the ModelSerializer from generating a default field.
class MyModel(models.Model):
f1 = models.CharField(max_length=10)
f2 = models.CharField(max_length=10)
class Parent(serializers.ModelSerializer):
class Meta:
model = MyModel
fields = ['f1', 'f2']
class Child(Parent):
f1 = None
class Grandchild(Child):
pass
assert len(Parent().get_fields()) == 2
assert len(Child().get_fields()) == 2
assert len(Grandchild().get_fields()) == 2
def test_multiple_inheritance(self):
class A(serializers.Serializer):
field = serializers.CharField()
class B(serializers.Serializer):
field = serializers.IntegerField()
class TestSerializer(A, B):
pass
fields = {
name: type(f) for name, f
in TestSerializer()._declared_fields.items()
}
assert fields == {
'field': serializers.CharField,
}
def test_field_ordering(self):
class Base(serializers.Serializer):
f1 = serializers.CharField()
f2 = serializers.CharField()
class A(Base):
f3 = serializers.IntegerField()
class B(serializers.Serializer):
f3 = serializers.CharField()
f4 = serializers.CharField()
class TestSerializer(A, B):
f2 = serializers.IntegerField()
f5 = serializers.CharField()
fields = {
name: type(f) for name, f
in TestSerializer()._declared_fields.items()
}
# `IntegerField`s should be the 'winners' in field name conflicts
# - `TestSerializer.f2` should override `Base.F2`
# - `A.f3` should override `B.f3`
assert fields == {
'f1': serializers.CharField,
'f2': serializers.IntegerField,
'f3': serializers.IntegerField,
'f4': serializers.CharField,
'f5': serializers.CharField,
}
| 37.27619
| 131
| 0.615921
|
293c937bece2236f515746eb532ad2b9cde23217
| 25,319
|
py
|
Python
|
textrenderer/corpus/chn_corpus.py
|
Sand0001/OCR_textrender_jap_chn_eng
|
87a01946bb8cd5229d2babcdf42a18df5b3e561f
|
[
"MIT"
] | null | null | null |
textrenderer/corpus/chn_corpus.py
|
Sand0001/OCR_textrender_jap_chn_eng
|
87a01946bb8cd5229d2babcdf42a18df5b3e561f
|
[
"MIT"
] | null | null | null |
textrenderer/corpus/chn_corpus.py
|
Sand0001/OCR_textrender_jap_chn_eng
|
87a01946bb8cd5229d2babcdf42a18df5b3e561f
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
from textrenderer.corpus.corpus import Corpus
class DIPCorpus:
def __init__(self):
self.content = ''
self.language = None
self.eng_whitespace_pos_list = []
self.low_char_index_dct = {}
self.low_charset_level_list = []
class ChnCorpus(Corpus):
def strQ2B(self, ustring):
"""全角转半角"""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 12288: #全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): #全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += chr(inside_code)
return rstring
def iseng(self, line):
#数据很大,就一行,只看前10个
line = line[0:30]
alpha_num = 0
for c in line:
if c <= 'z' and c >= 'a' or c >= 'A' and c <= 'Z':
alpha_num += 1
if alpha_num * 2 >= len(line):
return True
return False
def isalphnum(self, c):
if c <= 'z' and c >= 'a' or c >= 'A' and c <= 'Z':
return True
if c <= '9' and c >= '0':
return True
if c == '.' or c == ',':
return True
return False
def ischinese(self, word):
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
return True
return False
def prob(self, probability):
r = random.randint(0, 100)
#print ("Prob : ", r)
if r <= probability * 100:
return True
else:
return False
def load_balanced_sample(self):
self.single_words_list = []
for line in open("./data/corpus/singleword.dat"):
parts = line.strip('\r\n ').split(' ')
if parts[0] not in self.charsets:
continue
self.single_words_list.append(parts[0])
print ("Load Single Word List : ", len(self.single_words_list))
def load_subscript(self):
self.up_subscript_list = []
self.down_subscript_list = []
for line in open('./data/corpus/suscripts.dat'):
parts = line.strip('\r\n ').split(' ')[0]
#print(parts)
if parts not in self.charsets:
print(parts)
continue
if '▵' in parts:
self.up_subscript_list.append(parts)
elif '▿' in parts:
self.down_subscript_list.append(parts)
self.scripts_symbol = ['▵+', '▿+', '▵-', '▿-', '▵=', '▿=']
self.super_scripts_num = ['▵(' + i + '▵)' for i in self.up_subscript_list if
('▿' not in i and i != '▵(' and i != '▵)' and i != '▵=' and i != '▵-' and i != '▵+')]
self.sub_scripts_num = ['▿(' + i + '▿)' for i in self.down_subscript_list if
('▵' not in i and i != '▿(' and i != '▿)' and i != '▿=' and i != '▿-' and i != '▿+')]
self.super_scripts_num_1 = [i + '▵+' for i in self.up_subscript_list if
(
'▿' not in i and i != '▵(' and i != '▵)' and i != '▵=' and i != '▵-' and i != '▵+')]
self.super_scripts_num_2 = [i + '▵-' for i in self.up_subscript_list if
(
'▿' not in i and i != '▵(' and i != '▵)' and i != '▵=' and i != '▵-' and i != '▵+')]
self.super_scripts_num_3 = ['10▵-▵' + str(i) for i in range(1, 11)]
# print(self.subscript_list)
print("Load up_subscripts List : ", len(self.up_subscript_list))
print("Load down_subscripts List : ", len(self.down_subscript_list))
def load_charset_level(self):
self.low_charset_level = set()
self.mid_charset_level = set()
self.high_charset_level = set()
for line in open("./data/chars/high_charset"):
#本身字符集里面就用空格
line = line.strip('\r\n')
idx = line.rindex(' ')
if idx <= 0:
continue
#self.low_charset_level.append()
#self.mid_charset_level.append()
self.high_charset_level.add(line[0 : idx])
#self.single_words_list.append(parts[0])
print ("Load high_charset List : ", len(self.high_charset_level))
for line in open("./data/chars/mid_charset"):
line = line.strip('\r\n')
idx = line.rindex(' ')
if idx <= 0:
continue
#self.low_charset_level.append()
#self.mid_charset_level.append()
self.mid_charset_level.add(line[0 : idx])
#self.single_words_list.append(parts[0])
print ("Load mid_charset List : ", len(self.mid_charset_level))
for line in open("./data/chars/low_charset"):
line = line.strip('\r\n')
idx = line.rindex(' ')
if idx <= 0:
continue
#self.low_charset_level.append()
#self.mid_charset_level.append()
self.low_charset_level.add(line[0 : idx])
#self.single_words_list.append(parts[0])
print ("Load low_charset_level List : ", len(self.low_charset_level))
def load(self):
"""
Load one corpus file as one line , and get random {self.length} words as result
"""
self.load_corpus_path()
self.load_charset_level()
self.load_balanced_sample()
self.load_subscript()
self.has_been_created_text = {}
#记住这里多个corpus,这样的话,需要
#self.eng_whitespace_pos_list_dct = {}
filter_corpus_path = []
for i in self.corpus_path:
#if 'eng' in i.split('/')[-1]:
if 'chn' in i.split('/')[-1] or 'eng' in i.split('/')[-1] :
filter_corpus_path.append(i)
self.corpus_path = filter_corpus_path
for i, p in enumerate(self.corpus_path):
print_end = '\n' if i == len(self.corpus_path) - 1 else '\r'
print("Loading chn corpus: {}/{}".format(i + 1, len(self.corpus_path)), end=print_end)
with open(p, encoding='utf-8',errors='ignore') as f:
data = f.readlines()
lines = []
for line in data:
line_striped = line.strip('\r\n ')
#line_striped = line.strip()
if len(line_striped) < 5:
continue
line_striped = line_striped.replace('\u3000', ' ')
line_striped = line_striped.replace(' ', '')
line_striped = line_striped.replace("\00", "")
line_striped = line_striped.replace("()", "")
line_striped = line_striped.replace("()", "")
line_striped = line_striped.replace("[]", "")
line_striped = line_striped.replace("「」", "")
if len(line_striped) < 5:
continue
#line_striped = self.strQ2B(line_striped)
if line_striped != u'' and len(line.strip()) > 1:
lines.append(line_striped)
# 所有行合并成一行
split_chars = ['']
if len(lines) > 0:
if self.iseng(lines[0]):
split_chars = [' ']
splitchar = random.choice(split_chars)
whole_line = ' '.join(lines)
'''
total_len = 0
for line in lines:
filtered = ''.join(filter(lambda x: x in self.charsets, line))
#少于10个字的直接PASS
if len(filtered ) < 10:
continue
self.corpus.append(filtered)
total_len += len(filtered)
self.probability = [len(l) / float(total_len) for l in self.corpus]
'''
# 在 crnn/libs/label_converter 中 encode 时还会进行过滤
#whole_line = ''.join(filter(lambda x: x in self.charsets, whole_line))
#print (whole_line[0 : 500])
if len(whole_line) > self.length:
#计算Corpus语言
#如果是英文的话,计算一下所有空格的位置
eng_whitespace_pos_list = []
language = 'chn'
if self.iseng(whole_line):
language = 'eng'
for index in range(0, len(whole_line)):
if whole_line[index] == ' ':
eng_whitespace_pos_list.append(index)
if 'script' in p:
language = 'eng_script'
#计算每个稀缺的字的位置
# self.mid_char_index_dct = {}
low_char_index_dct = {}
for index in range(0, len(whole_line)):
c = whole_line[index]
#如果不是稀缺字,那么手动886
if c not in self.low_charset_level:
continue
if c in low_char_index_dct:
low_char_index_dct[c].append(index)
else:
low_char_index_dct[c] = [index]
low_charset_level_list = [e for e in low_char_index_dct]
corpus = DIPCorpus()
corpus.content = whole_line
corpus.eng_whitespace_pos_list = eng_whitespace_pos_list
corpus.low_char_index_dct = low_char_index_dct
corpus.low_charset_level_list = low_charset_level_list
corpus.language = language
self.corpus.append(corpus)
#尝试找到一个完整单词的界限,尽量不要截断单词
def get_content_of_len_from_pos(self, content, length, pos, max_step = 6):
word = ''
cur_len = 0
start = pos
#rand_len = length + (random.randint(0, 8) - 4)
#length = rand_len
while cur_len < length and start < len(content):
c = content[start]
if self.ischinese(c):
cur_len += 2
else:
cur_len += 1
word += content[start]
start += 1
isalpha = lambda x: x>= 'a' and x<='z' or x >= 'A' and x <= 'Z'
#如果结尾是个单词,那么往后继续查, 直到找到空格,尽量保证单词的完整性
if isalpha(word[len(word) - 1]):
while cur_len < length + max_step and start < len(content):
c = content[start]
if c == ' ':
break
if self.ischinese(c):
cur_len += 2
else:
cur_len += 1
word += content[start]
start += 1
word = word.strip(' ')
return word
#从一个语料中抽取一截
def choose_line(self, corpus):
line = corpus.content
language = corpus.language
eng_whitespace_pos_list = corpus.eng_whitespace_pos_list
length = self.length
#if self.iseng(line):
#汉字算长度2,英文算1
length = 2 * self.length
##尝试找到一个完整单词的界限,尽量不要截断单词,最多尝试6步
max_step = 5
if language == 'eng':
pos = np.random.randint(0, len(eng_whitespace_pos_list) - 1)
start = eng_whitespace_pos_list[pos]
start += 1
else:
start = np.random.randint(0, len(line) - length - max_step)
length = length + (random.randint(0, 8) - 4)
return self.get_content_of_len_from_pos(line, length, start, max_step)
#判断这个数据是不是高频句子
def balanced_sample(self, candidate_word, language):
#如果汉字全是高频词
all_high_freq = True
if language == 'chn':
for c in candidate_word:
if c not in self.high_charset_level:
all_high_freq = False
break
else:
return True
if all_high_freq:
return False
return True
def get_sample(self, img_index):
# 每次 gen_word,随机选一个预料文件,随机获得长度为 word_length 的字符
#补充一下单字,特别是那种频次特别低的单字
r = random.randint(0, 8)
#r = 1
#print ("GET SAMPLE ", r, len(self.has_been_created_text))
#print (r, len(self.single_words_list))
#if False and len(self.single_words_list) > 0 and self.prob(0.02):
if len(self.single_words_list) > 0 and self.prob(0.02):
word = ''
for i in range(0, self.length):
r_i = random.randint(0, len(self.single_words_list) - 1)
word += self.single_words_list[r_i]
#如果已经出现过了,那么Continue掉
if word in self.has_been_created_text:
#print ("Abandon has_been_created_text word : ", word)
raise Exception("single_words, already has been created")
return None
self.has_been_created_text[word] = 1
return word, 'chn'
corpus = random.choice(self.corpus)
#减少一些英文的比例
if corpus.language == 'eng' and self.prob(0.2):
corpus = random.choice(self.corpus)
#选择稀有词所在的位置进行嘎嘎
#降低概率
if False and corpus.language == 'chn' and len(corpus.low_charset_level_list) > 0 and self.prob(0.25):
line = corpus.content
r_i = random.randint(0, len(corpus.low_charset_level_list) - 1)
index_list = corpus.low_char_index_dct[ corpus.low_charset_level_list[r_i]]
#print ("Low Word Index_List", index_list)
r_list_i = index_list[random.randint(0, len(index_list) - 1)]
#还是固定一下位置吧,这样好做去重,否则的话,会出现一大堆只差一个字的奇奇怪怪的东西
#r_start = random.randint(r_list_i - self.length + 1, r_list_i)
r_start = r_list_i - 3
#print ("Low Word Start : ", r_start)
if r_start >= 0 and r_start + self.length < len(line):
word = self.get_content_of_len_from_pos(line, 2 * self.length, r_start)
#word = line [r_start : r_start + self.length]
print ("Choose Low Word : ", corpus.low_charset_level_list[r_i], " Choose : ", word)
if word in self.has_been_created_text:
print ("Abandon has_been_created_text word : ", word)
#return None
self.has_been_created_text[word] = 1
return word, corpus.language
else:
return None
language = corpus.language
retry_num = 10
OK = False
for i in range(0, retry_num):
word = self.choose_line(corpus)
#print ("try : ", word)
if word in self.has_been_created_text:
#print ("choose already exists : ", word)
continue
OK = True
break
'''
#平衡样本
if self.balanced_sample(word, language):
OK = True
#print ("Found Balanced word : ", word)
break
else:
#print ("Found unBalanced word : ", word)
#70%的概率保留非平衡样本
if self.prob(0.75):
OK = True
#print ("preserve unBalanced word : ", word)
break
else:
pass
#print ("Abandon unBalanced word : ", word)
#如果全是高频词,那么有一定的概率保留
'''
if False == OK:
#print ("failed to find sample after tried : ", retry_num)
#raise Exception("Failed to found sample")
return None
self.has_been_created_text[word] = 1
#print(language)
if corpus.language == 'chn' and self.prob(0.006) and self.ischinese(word[-1]) :
str_list_right = '》!?〉】〕」’:)】。、'
prob = [0.1,0.08,0.08,0.04,0.04,0.02,0.01,0.08,0.05,0.12,0.2,0.18]
tmp_word_1 = np.random.choice(list(str_list_right),1,p=prob)
#tmp_word_1= random.choice(list(str_list_right))
word = word.strip(' ')+tmp_word_1[0]
if corpus.language == 'chn' and self.prob(0.005) and self.ischinese(word[0]) :
str_list_left = '《〈【〔「('
prob = [0.22,0.1,0.1,0.15,0.05,0.38]
#str_list_right = '⦆!?〉】〕」‘’:“”】]。、'
tmp_word_1 = np.random.choice(list(str_list_left),1,p=prob)
#tmp_word_1= random.choice(list(str_list_left))
word = tmp_word_1[0] + word.strip(' ')
if language == 'eng':
#有一定的几率全大写
if self.prob(0.02):
word = word.upper()
#有一定的几率首字母大写 TODO
#if self.prob(0.02):
# word =
#print ("Choose Word : [", word , "]" , len(word), language)
#word = line[start:start + length]
#不能让文本的开始和结束有空格的出现
return word.strip(' '), language
def get_scripts(self,language,on_left = False):
if language == 'chn' or language == 'jap':
sp_symbol = random.choice(['①', '②', '③', '④', '⑤', '⑥', '⑦', '⑧', '⑨', '⑩', '®', '©', '*', '∞', '※'])
super_or_sub = random.choice(['▿', '▵'])
add_scripts = super_or_sub + sp_symbol
return add_scripts
scripts = random.choice([self.down_subscript_list, self.up_subscript_list])
scripts_word = [' 1▵s▵t', ' 3▵r▵d', ' 2▵n▵d', ' 4▵t▵h', '▵T▵M', '▵t▵h']
gen_method = np.random.randint(0, 9)
if gen_method == 1:
add_scripts = random.choice(self.sub_scripts_num)
elif gen_method == 2: # self.super_scripts_num_1
add_scripts = random.choice(self.super_scripts_num)
elif gen_method == 3:
add_scripts = random.choice(self.super_scripts_num_1)
elif gen_method == 4:
add_scripts = random.choice(self.super_scripts_num_2)
elif gen_method == 5:
add_scripts = random.choice(scripts_word)
elif gen_method == 6:
add_scripts = random.choice(self.super_scripts_num_3)
elif gen_method == 7:
# add_scripts = random.choice(['▿©', '▿®', '▵©', '▵®'])
sp_symbol = random.choice(['①', '②', '③', '④', '⑤', '⑥', '⑦', '⑧', '⑨', '⑩', '®', '©', '*', '∞', '※'])
super_or_sub = random.choice(['▿', '▵'])
add_scripts = super_or_sub + sp_symbol
else:
add_scripts = ''
num_list = [1, 1, 2, 2, 3, 4]
num = random.choice(num_list) # 随机放置几个连续角标
one_more_time = True
for i in range(num):
tmp_script = np.random.choice(scripts)
if ')' in tmp_script and ('(' not in add_scripts) and (i != num - 1):
continue
if tmp_script in self.scripts_symbol:
one_more_time = False
if one_more_time:
add_scripts += np.random.choice(scripts)
else:
if tmp_script not in self.scripts_symbol:
add_scripts += np.random.choice(scripts)
if add_scripts.count('▵(') % 2 == 1:
add_scripts += '▵)'
if add_scripts.count('▿(') % 2 == 1:
add_scripts += '▿)'
if on_left:
add_scripts = add_scripts.replace('▵+','')
add_scripts = add_scripts.replace('▵-', '')
add_scripts = add_scripts.replace('▿+', '')
add_scripts = add_scripts.replace('▿-', '')
if self.prob(0.85) and '▿' in add_scripts:
add_scripts = ''
return add_scripts
def get_scripts_index_list(self,word_list):
subscript_index_list = []
for i in range(np.random.randint(3)): # 随机取放置角标位置
tmp_i = np.random.randint(0, len(word_list))
if tmp_i not in subscript_index_list: # 避免出现重复位置
subscript_index_list.append(tmp_i)
return subscript_index_list
def get_word_list_index_value(self,word_list,subscript_index,language):
if np.random.randint(1, 7) == 1:
add_scripts = self.get_scripts(language,on_left=True)
return add_scripts + word_list[subscript_index]
else:
add_scripts = self.get_scripts(language)
return word_list[subscript_index] + add_scripts
def get_sample_add_script(self, img_index):
# 每次 gen_word,随机选一个预料文件,随机获得长度为 word_length 的字符
#补充一下单字,特别是那种频次特别低的单字
#r = random.randint(0, 30)
#print (r, len(self.single_words_list))
if self.prob(0.02) and len(self.single_words_list) > 0:
word = ''
for i in range(0, self.length):
r_i = random.randint(0, len(self.single_words_list) - 1)
word += self.single_words_list[r_i]
return word, 'chn'
language = np.random.choice(['chn','eng', 'eng_script'], p=[0.55,0.4,0.05])
for item in self.corpus:
if item.language == language:
corpus = item
break
if False and corpus.language == 'chn' and len(corpus.low_charset_level_list) > 0 and self.prob(0.25):
line = corpus.content
r_i = random.randint(0, len(corpus.low_charset_level_list) - 1)
index_list = corpus.low_char_index_dct[ corpus.low_charset_level_list[r_i]]
#print ("Low Word Index_List", index_list)
r_list_i = index_list[random.randint(0, len(index_list) - 1)]
#还是固定一下位置吧,这样好做去重,否则的话,会出现一大堆只差一个字的奇奇怪怪的东西
#r_start = random.randint(r_list_i - self.length + 1, r_list_i)
r_start = r_list_i - 3
#print ("Low Word Start : ", r_start)
if r_start >= 0 and r_start + self.length < len(line):
word = self.get_content_of_len_from_pos(line, 2 * self.length, r_start)
#word = line [r_start : r_start + self.length]
print ("Choose Low Word : ", corpus.low_charset_level_list[r_i], " Choose : ", word)
if word in self.has_been_created_text:
print ("Abandon has_been_created_text word : ", word)
#return None
self.has_been_created_text[word] = 1
return word, corpus.language
else:
return None
word = self.choose_line(corpus)
language = corpus.language
if language == 'eng' and self.prob(0.04):
#有一定的几率全大写
word = word.upper()
#有一定的几率在句末添加全角句号和顿号
if corpus.language == 'chn' and self.prob(0.006) and self.ischinese(word[-1]) :
str_list_right = '》!?〉】〕」:)】。、'
prob = [0.1,0.08,0.08,0.04,0.04,0.02,0.01,0.08,0.05,0.12,0.20,0.18]
tmp_word_1 = np.random.choice(list(str_list_right),1,p=prob)
#tmp_word_1= random.choice(list(str_list_right))
word = word.strip(' ')+tmp_word_1[0]
if corpus.language == 'chn' and self.prob(0.005) and self.ischinese(word[0]) :
str_list_left = '《〈【〔「('
prob = [0.22,0.1,0.1,0.15,0.05,0.38]
#str_list_right = '⦆!?〉】〕」‘’:“”】]。、'
tmp_word_1 = np.random.choice(list(str_list_left),1,p=prob)
#tmp_word_1= random.choice(list(str_list_left))
word = tmp_word_1[0] + word.strip(' ')
if (language == 'eng' and self.prob(0.11)) or (language == 'chn' and self.prob(0.01)):
#if language == 'eng' and self.prob(0.02):
# print(language)
# 有一定的几率将word中的字母随机替换成角标
if self.prob(0.2):
word_list = list(word)
subscript_index_list = []
for i in range(np.random.randint(3 )): #随机取放置角标位置
tmp_i = np.random.randint(0,len(word_list))
if tmp_i not in subscript_index_list: #避免出现重复位置
subscript_index_list.append(tmp_i)
for subscript_index in subscript_index_list:
if subscript_index + 1 < len(word_list):
if word_list[subscript_index] != ' ' and word_list[subscript_index + 1] != ' ': # 前后不能都是空格
word_list[subscript_index] = self.get_word_list_index_value(word_list, subscript_index,language)
else:
word_list[subscript_index] = self.get_word_list_index_value(word_list, subscript_index ,language)
word = ''.join(word_list)
else:
word_list = word.split(' ')
subscript_index_list = self.get_scripts_index_list(word_list)
for subscript_index in subscript_index_list:
if word_list[subscript_index] != '':
aaa = ',.!;:'
if word_list[subscript_index][-1] in aaa :
if self.prob(0.1):
word_list[subscript_index] = self.get_word_list_index_value(word_list,subscript_index,language)
else:
word_list[subscript_index] = self.get_word_list_index_value(word_list, subscript_index,language)
word = ' '.join(word_list)
if word[-1] =='▵' or word[-1] =='▿':
word = word[:-1]
if language == 'eng_script':
language = 'eng'
return word.strip(' '), language
| 40.903069
| 132
| 0.51649
|
cc2afa7ba12ba65b1baffecdfbd0a1d7c6b9a9f0
| 37,584
|
py
|
Python
|
tensorflow/python/ops/stateless_random_ops.py
|
computationalartist/tensorflow
|
b89cf636c412abdff53b3e8f201bde671c92209d
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/ops/stateless_random_ops.py
|
computationalartist/tensorflow
|
b89cf636c412abdff53b3e8f201bde671c92209d
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/ops/stateless_random_ops.py
|
computationalartist/tensorflow
|
b89cf636c412abdff53b3e8f201bde671c92209d
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input."""
import enum
import numpy as np
import six
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("StatelessMultinomial")
ops.NotDifferentiable("StatelessRandomBinomial")
ops.NotDifferentiable("StatelessRandomNormal")
ops.NotDifferentiable("StatelessRandomPoisson")
ops.NotDifferentiable("StatelessRandomUniform")
ops.NotDifferentiable("StatelessRandomUniformInt")
ops.NotDifferentiable("StatelessRandomUniformFullInt")
ops.NotDifferentiable("StatelessTruncatedNormal")
ops.NotDifferentiable("StatelessRandomNormalV2")
ops.NotDifferentiable("StatelessRandomUniformV2")
ops.NotDifferentiable("StatelessRandomUniformIntV2")
ops.NotDifferentiable("StatelessRandomUniformFullIntV2")
ops.NotDifferentiable("StatelessTruncatedNormalV2")
@tf_export("random.Algorithm", "random.experimental.Algorithm")
class Algorithm(enum.Enum):
# The numbers here must match framework/rng_alg.h
PHILOX = 1
THREEFRY = 2
AUTO_SELECT = 3
def convert_alg_to_int(alg):
"""Converts algorithm to an integer.
Args:
alg: can be one of these types: integer, Algorithm, Tensor, string. Allowed
strings are "philox" and "threefry".
Returns:
An integer, unless the input is a Tensor in which case a Tensor is returned.
"""
if isinstance(alg, six.integer_types):
return alg
if isinstance(alg, Algorithm):
return alg.value
if isinstance(alg, ops.Tensor):
return alg
if isinstance(alg, str):
if alg == "philox":
return Algorithm.PHILOX.value
elif alg in ("threefry", "three-fry", "three_fry"):
return Algorithm.THREEFRY.value
elif alg in ("autoselect", "auto-select", "auto_select"):
return Algorithm.AUTO_SELECT.value
else:
raise ValueError(
f"Argument `alg` got unsupported string value {alg}. Supported "
f"string values are 'philox' for the Philox algorithm, 'threefry' "
f"for the ThreeFry algorithm, and 'auto_select' for auto-selection.")
else:
raise TypeError(
f"Can't convert argument `alg` (of value {alg} and type {type(alg)}) "
f"to int.")
def _resolve_alg(alg):
if alg == Algorithm.AUTO_SELECT.value:
return gen_stateless_random_ops_v2.stateless_random_get_alg()
return alg
def _get_key_counter(seed, alg):
"""Calculates the key and counter to pass to raw RNG ops.
This function calculates the key and counter that will be passed to
the raw RNG ops like `StatelessRandomUniformV2`. Depending on the
input `alg`, the key and counter may be scrambled or copied from
`seed`. If `alg` is `"auto_select"`, the key and counter will be
determined at runtime based on device type.
Args:
seed: An integer tensor of shape [2]. The seed to calculate the
key and counter from.
alg: The RNG algorithm. See `tf.random.stateless_uniform` for an
explanation.
Returns:
A pair (key, counter) suitable for V2 stateless RNG ops like
`StatelessRandomUniformV2`.
"""
if alg == Algorithm.AUTO_SELECT.value:
key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(
seed)
elif alg == Algorithm.PHILOX.value:
key, counter = _philox_scramble_seed(seed)
elif alg == Algorithm.THREEFRY.value:
key = array_ops.reshape(
uint32s_to_uint64(math_ops.cast(seed, dtypes.uint32)), [1])
counter = array_ops.zeros([1], dtypes.uint64)
else:
raise ValueError(
f"Argument `alg` got unsupported value {alg}. Supported values are "
f"{Algorithm.PHILOX.value} for the Philox algorithm, "
f"{Algorithm.THREEFRY.value} for the ThreeFry algorithm, and "
f"{Algorithm.AUTO_SELECT.value} for auto-selection.")
return key, counter
def _get_key_counter_alg(seed, alg):
if alg is None:
alg = Algorithm.AUTO_SELECT.value
alg = convert_alg_to_int(alg)
key, counter = _get_key_counter(seed, alg)
if compat.forward_compatible(2021, 8, 11):
return key, counter, alg
else:
return key, counter, _resolve_alg(alg)
def _philox_scramble_seed(seed):
# the same scrambling procedure as core/kernels/stateless_random_ops.cc
key = constant_op.constant([0x02461e293ec8f720], dtypes.uint64)
counter = math_ops.cast(seed, dtypes.uint64)
mix = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(
[4], key=key, counter=counter, dtype=dtypes.uint32,
alg=Algorithm.PHILOX.value)
key = array_ops.reshape(uint32s_to_uint64(mix[:2]), [1])
counter = array_ops.stack([0, uint32s_to_uint64(mix[2:])], axis=0)
return key, counter
def uint32s_to_uint64(x):
return bitwise_ops.bitwise_or(
math_ops.cast(x[0], dtypes.uint64),
bitwise_ops.left_shift(math_ops.cast(x[1], dtypes.uint64),
constant_op.constant(32, dtypes.uint64)))
@tf_export("random.experimental.stateless_split")
@dispatch.add_dispatch_support
def split(seed, num=2, alg="auto_select"):
"""Splits an RNG seed into `num` new seeds by adding a leading axis.
Example:
>>> seed = [1, 2]
>>> new_seeds = tf.random.experimental.stateless_split(seed, num=3)
>>> print(new_seeds)
tf.Tensor(
[[1105988140 1738052849]
[-335576002 370444179]
[ 10670227 -246211131]], shape=(3, 2), dtype=int32)
>>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :])
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.59835213, -0.9578608 ,
0.9002807 ], dtype=float32)>
Args:
seed: an RNG seed (a tensor with shape [2] and dtype `int32` or
`int64`). (When using XLA, only `int32` is allowed.)
num: optional, a positive integer or scalar tensor indicating the number of
seeds to produce (default 2).
alg: The RNG algorithm used to generate the random numbers. See
`tf.random.stateless_uniform` for a detailed explanation.
Returns:
A tensor with shape [num, 2] representing `num` new seeds. It will have the
same dtype as `seed` (if `seed` doesn't have an explict dtype, the dtype
will be determined by `tf.convert_to_tensor`).
"""
seed = ops.convert_to_tensor(seed)
return stateless_random_uniform(shape=[num, 2], seed=seed, dtype=seed.dtype,
minval=None, maxval=None, alg=alg)
@tf_export("random.experimental.stateless_fold_in")
@dispatch.add_dispatch_support
def fold_in(seed, data, alg="auto_select"):
"""Folds in data to an RNG seed to form a new RNG seed.
For example, in a distributed-training setting, suppose we have a master seed
and a replica ID. We want to fold the replica ID into the master seed to
form a "replica seed" to be used by that replica later on, so that different
replicas will generate different random numbers but the reproducibility of the
whole system can still be controlled by the master seed:
>>> master_seed = [1, 2]
>>> replica_id = 3
>>> replica_seed = tf.random.experimental.stateless_fold_in(
... master_seed, replica_id)
>>> print(replica_seed)
tf.Tensor([1105988140 3], shape=(2,), dtype=int32)
>>> tf.random.stateless_normal(shape=[3], seed=replica_seed)
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.03197195, 0.8979765 ,
0.13253039], dtype=float32)>
Args:
seed: an RNG seed (a tensor with shape [2] and dtype `int32` or
`int64`). (When using XLA, only `int32` is allowed.)
data: an `int32` or `int64` scalar representing data to be folded in to the
seed.
alg: The RNG algorithm used to generate the random numbers. See
`tf.random.stateless_uniform` for a detailed explanation.
Returns:
A new RNG seed that is a deterministic function of the inputs and is
statistically safe for producing a stream of new pseudo-random values. It
will have the same dtype as `data` (if `data` doesn't have an explict dtype,
the dtype will be determined by `tf.convert_to_tensor`).
"""
data = ops.convert_to_tensor(data)
seed1 = stateless_random_uniform(shape=[], seed=seed, dtype=data.dtype,
minval=None, maxval=None, alg=alg)
return array_ops.stack([seed1, data])
@tf_export("random.stateless_uniform")
@dispatch.add_dispatch_support
def stateless_random_uniform(shape,
seed,
minval=0,
maxval=None,
dtype=dtypes.float32,
name=None,
alg="auto_select"):
"""Outputs deterministic pseudorandom values from a uniform distribution.
This is a stateless version of `tf.random.uniform`: if run twice with the
same seeds and shapes, it will produce the same pseudorandom numbers. The
output is consistent across multiple runs on the same hardware (and between
CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
For full-range (i.e. inclusive of both max and min) random integers, pass
`minval=None` and `maxval=None` with an integer `dtype`. For an integer dtype
either both `minval` and `maxval` must be `None` or neither may be `None`. For
example:
```python
ints = tf.random.stateless_uniform(
[10], seed=(2, 3), minval=None, maxval=None, dtype=tf.int32)
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
minval: A Tensor or Python value of type `dtype`, broadcastable with
`shape` (for integer types, broadcasting is not supported, so it needs to
be a scalar). The lower bound on the range of random values to
generate. Pass `None` for full-range integers. Defaults to 0.
maxval: A Tensor or Python value of type `dtype`, broadcastable with
`shape` (for integer types, broadcasting is not supported, so it needs to
be a scalar). The upper bound on the range of random values to generate.
Defaults to 1 if `dtype` is floating point. Pass `None` for full-range
integers.
dtype: The type of the output: `float16`, `bfloat16`, `float32`, `float64`,
`int32`, or `int64`. For unbounded uniform ints (`minval`, `maxval` both
`None`), `uint32` and `uint64` may be used. Defaults to `float32`.
name: A name for the operation (optional).
alg: The RNG algorithm used to generate the random numbers. Valid
choices are `"philox"` for [the Philox
algorithm](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf),
`"threefry"` for [the ThreeFry
algorithm](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf),
and `"auto_select"` (default) for the system to automatically
select an algorithm based the device type. Values of
`tf.random.Algorithm` can also be used. Note that with
`"auto_select"`, the outputs of this function may change when
it is running on a different device.
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and only one of `minval` or `maxval` is
specified.
"""
dtype = dtypes.as_dtype(dtype)
accepted_dtypes = (dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.int32, dtypes.int64, dtypes.uint32,
dtypes.uint64)
if dtype not in accepted_dtypes:
raise ValueError(
f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are "
f"{accepted_dtypes}.")
if dtype.is_integer:
if (minval is None) != (maxval is None):
raise ValueError(
f"For integer `dtype` argument {dtype}, argument `minval` and "
f"`maxval` must be both None or not None. Got `minval`={minval} and "
f"`maxval`={maxval}.")
if minval is not None and dtype in (dtypes.uint32, dtypes.uint64):
raise ValueError(
f"Argument `dtype` got invalid value {dtype} when argument `minval` "
f"is not None. Please don't use unsigned integers in this case.")
elif maxval is None:
maxval = 1
with ops.name_scope(name, "stateless_random_uniform",
[shape, seed, minval, maxval]) as name:
shape = tensor_util.shape_tensor(shape)
if dtype.is_integer and minval is None:
key, counter, alg = _get_key_counter_alg(seed, alg)
result = (
gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(
shape, key=key, counter=counter, dtype=dtype, alg=alg, name=name))
else:
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
key, counter, alg = _get_key_counter_alg(seed, alg)
result = gen_stateless_random_ops_v2.stateless_random_uniform_int_v2(
shape,
key=key,
counter=counter,
minval=minval,
maxval=maxval,
alg=alg,
name=name)
else:
key, counter, alg = _get_key_counter_alg(seed, alg)
rnd = gen_stateless_random_ops_v2.stateless_random_uniform_v2(
shape, key=key, counter=counter, dtype=dtype, alg=alg)
result = math_ops.add(rnd * (maxval - minval), minval, name=name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_binomial")
@dispatch.add_dispatch_support
def stateless_random_binomial(shape,
seed,
counts,
probs,
output_dtype=dtypes.int32,
name=None):
"""Outputs deterministic pseudorandom values from a binomial distribution.
The generated values follow a binomial distribution with specified count and
probability of success parameters.
This is a stateless version of `tf.random.Generator.binomial`: if run twice
with the same seeds and shapes, it will produce the same pseudorandom numbers.
The output is consistent across multiple runs on the same hardware (and
between CPU and GPU), but may change between versions of TensorFlow or on
non-CPU/GPU hardware.
Example:
```python
counts = [10., 20.]
# Probability of success.
probs = [0.8]
binomial_samples = tf.random.stateless_binomial(
shape=[2], seed=[123, 456], counts=counts, probs=probs)
counts = ... # Shape [3, 1, 2]
probs = ... # Shape [1, 4, 2]
shape = [3, 4, 3, 4, 2]
# Sample shape will be [3, 4, 3, 4, 2]
binomial_samples = tf.random.stateless_binomial(
shape=shape, seed=[123, 456], counts=counts, probs=probs)
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
counts: Tensor. The counts of the binomial distribution. Must be
broadcastable with `probs`, and broadcastable with the rightmost
dimensions of `shape`.
probs: Tensor. The probability of success for the binomial distribution.
Must be broadcastable with `counts` and broadcastable with the rightmost
dimensions of `shape`.
output_dtype: The type of the output. Default: tf.int32
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random binomial
values. For each i, each samples[..., i] is an independent draw from
the binomial distribution on counts[i] trials with probability of
success probs[i].
"""
with ops.name_scope(name, "stateless_random_binomial",
[shape, seed, counts, probs]) as name:
shape = tensor_util.shape_tensor(shape)
probs = ops.convert_to_tensor(
probs, dtype_hint=dtypes.float32, name="probs")
counts = ops.convert_to_tensor(
counts, dtype_hint=probs.dtype, name="counts")
result = gen_stateless_random_ops.stateless_random_binomial(
shape=shape, seed=seed, counts=counts, probs=probs, dtype=output_dtype)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_gamma")
@dispatch.add_dispatch_support
def stateless_random_gamma(shape,
seed,
alpha,
beta=None,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a gamma distribution.
The generated values follow a gamma distribution with specified concentration
(`alpha`) and inverse scale (`beta`) parameters.
This is a stateless version of `tf.random.gamma`: if run twice with the same
seeds and shapes, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU and
GPU),
but may change between versions of TensorFlow or on non-CPU/GPU hardware.
A slight difference exists in the interpretation of the `shape` parameter
between `stateless_gamma` and `gamma`: in `gamma`, the `shape` is always
prepended to the shape of the broadcast of `alpha` with `beta`; whereas in
`stateless_gamma` the `shape` parameter must always encompass the shapes of
each of `alpha` and `beta` (which must broadcast together to match the
trailing dimensions of `shape`).
Note: Because internal calculations are done using `float64` and casting has
`floor` semantics, we must manually map zero outcomes to the smallest
possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This
means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise
should. This bias can only happen for small values of `alpha`, i.e.,
`alpha << 1` or large values of `beta`, i.e., `beta >> 1`.
The samples are differentiable w.r.t. alpha and beta.
The derivatives are computed using the approach described in
(Figurnov et al., 2018).
Example:
```python
samples = tf.random.stateless_gamma([10, 2], seed=[12, 34], alpha=[0.5, 1.5])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random.stateless_gamma([7, 5, 2], seed=[12, 34], alpha=[.5, 1.5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
alpha = tf.constant([[1.], [3.], [5.]])
beta = tf.constant([[3., 4.]])
samples = tf.random.stateless_gamma(
[30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta)
# samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.
with tf.GradientTape() as tape:
tape.watch([alpha, beta])
loss = tf.reduce_mean(tf.square(tf.random.stateless_gamma(
[30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta)))
dloss_dalpha, dloss_dbeta = tape.gradient(loss, [alpha, beta])
# unbiased stochastic derivatives of the loss function
alpha.shape == dloss_dalpha.shape # True
beta.shape == dloss_dbeta.shape # True
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
alpha: Tensor. The concentration parameter of the gamma distribution. Must
be broadcastable with `beta`, and broadcastable with the rightmost
dimensions of `shape`.
beta: Tensor. The inverse scale parameter of the gamma distribution. Must be
broadcastable with `alpha` and broadcastable with the rightmost dimensions
of `shape`.
dtype: Floating point dtype of `alpha`, `beta`, and the output.
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random gamma values.
For each i, each `samples[..., i] is an independent draw from the gamma
distribution with concentration alpha[i] and scale beta[i].
"""
with ops.name_scope(name, "stateless_random_gamma",
[shape, seed, alpha, beta]) as name:
shape = tensor_util.shape_tensor(shape)
alpha = ops.convert_to_tensor(alpha, dtype=dtype, name="alpha")
beta = ops.convert_to_tensor(
beta if beta is not None else 1, name="beta", dtype=dtype)
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(alpha), array_ops.shape(beta))
alpha_broadcast = array_ops.broadcast_to(alpha, broadcast_shape)
result = math_ops.maximum(
np.finfo(alpha.dtype.as_numpy_dtype).tiny,
gen_stateless_random_ops.stateless_random_gamma_v2(
shape, seed=seed, alpha=alpha_broadcast) / beta)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_poisson")
@dispatch.add_dispatch_support
def stateless_random_poisson(shape,
seed,
lam,
dtype=dtypes.int32,
name=None):
"""Outputs deterministic pseudorandom values from a Poisson distribution.
The generated values follow a Poisson distribution with specified rate
parameter.
This is a stateless version of `tf.random.poisson`: if run twice with the same
seeds and shapes, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware, but may change between
versions of TensorFlow or on non-CPU/GPU hardware.
A slight difference exists in the interpretation of the `shape` parameter
between `stateless_poisson` and `poisson`: in `poisson`, the `shape` is always
prepended to the shape of `lam`; whereas in `stateless_poisson` the shape of
`lam` must match the trailing dimensions of `shape`.
Example:
```python
samples = tf.random.stateless_poisson([10, 2], seed=[12, 34], lam=[5, 15])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random.stateless_poisson([7, 5, 2], seed=[12, 34], lam=[5, 15])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
rate = tf.constant([[1.], [3.], [5.]])
samples = tf.random.stateless_poisson([30, 3, 1], seed=[12, 34], lam=rate)
# samples has shape [30, 3, 1], with 30 samples each of 3x1 distributions.
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
lam: Tensor. The rate parameter "lambda" of the Poisson distribution. Shape
must match the rightmost dimensions of `shape`.
dtype: Dtype of the samples (int or float dtypes are permissible, as samples
are discrete). Default: int32.
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random Poisson values.
For each i, each `samples[..., i]` is an independent draw from the Poisson
distribution with rate `lam[i]`.
"""
with ops.name_scope(name, "stateless_random_poisson",
[shape, seed, lam]) as name:
shape = tensor_util.shape_tensor(shape)
result = gen_stateless_random_ops.stateless_random_poisson(
shape, seed=seed, lam=lam, dtype=dtype)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_normal")
@dispatch.add_dispatch_support
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None,
alg="auto_select"):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random.normal`: if run twice with the
same seeds and shapes, it will produce the same pseudorandom numbers. The
output is consistent across multiple runs on the same hardware (and between
CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The float type of the output: `float16`, `bfloat16`, `float32`,
`float64`. Defaults to `float32`.
name: A name for the operation (optional).
alg: The RNG algorithm used to generate the random numbers. See
`tf.random.stateless_uniform` for a detailed explanation.
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = tensor_util.shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
key, counter, alg = _get_key_counter_alg(seed, alg)
rnd = gen_stateless_random_ops_v2.stateless_random_normal_v2(
shape, key=key, counter=counter, dtype=dtype, alg=alg)
result = math_ops.add(rnd * stddev, mean, name=name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_truncated_normal")
@dispatch.add_dispatch_support
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None,
alg="auto_select"):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.random.truncated_normal`: if run twice with
the same seeds and shapes, it will produce the same pseudorandom numbers. The
output is consistent across multiple runs on the same hardware (and between
CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
alg: The RNG algorithm used to generate the random numbers. See
`tf.random.stateless_uniform` for a detailed explanation.
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_truncated_normal",
[shape, seed, mean, stddev]) as name:
shape = tensor_util.shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
key, counter, alg = _get_key_counter_alg(seed, alg)
rnd = gen_stateless_random_ops_v2.stateless_truncated_normal_v2(
shape, key=key, counter=counter, dtype=dtype, alg=alg)
result = math_ops.add(rnd * stddev, mean, name=name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export(v1=["random.stateless_multinomial"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None, instructions="Use `tf.random.stateless_categorical` instead.")
def stateless_multinomial(logits,
num_samples,
seed,
output_dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a multinomial distribution.
This is a stateless version of `tf.random.categorical`: if run twice with the
same seeds and shapes, it will produce the same pseudorandom numbers. The
output is consistent across multiple runs on the same hardware (and between
CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_categorical(
tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
output_dtype: The integer type of the output: `int32` or `int64`. Defaults
to `int64`.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_multinomial", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples,
output_dtype, seed)
@tf_export("random.stateless_categorical")
@dispatch.add_dispatch_support
def stateless_categorical(logits,
num_samples,
seed,
dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a categorical distribution.
This is a stateless version of `tf.categorical`: if run twice with the
same seeds and shapes, it will produce the same pseudorandom numbers. The
output is consistent across multiple runs on the same hardware (and between
CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_categorical(
tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
dtype: The integer type of the output: `int32` or `int64`. Defaults to
`int64`.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_categorical", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples, dtype,
seed)
def stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed):
"""Implementation for stateless multinomial/categorical ops (v1/v2)."""
logits = ops.convert_to_tensor(logits, name="logits")
dtype = dtypes.as_dtype(dtype) if dtype else dtypes.int64
accepted_dtypes = (dtypes.int32, dtypes.int64)
if dtype not in accepted_dtypes:
raise ValueError(
f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are "
f"{accepted_dtypes}.")
return gen_stateless_random_ops.stateless_multinomial(
logits, num_samples, seed, output_dtype=dtype)
@dispatch.add_dispatch_support
@tf_export("random.stateless_parameterized_truncated_normal")
def stateless_parameterized_truncated_normal(shape,
seed,
means=0.0,
stddevs=1.0,
minvals=-2.0,
maxvals=2.0,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Examples:
Sample from a Truncated normal, with deferring shape parameters that
broadcast.
>>> means = 0.
>>> stddevs = tf.math.exp(tf.random.uniform(shape=[2, 3]))
>>> minvals = [-1., -2., -1000.]
>>> maxvals = [[10000.], [1.]]
>>> y = tf.random.stateless_parameterized_truncated_normal(
... shape=[10, 2, 3], seed=[7, 17],
... means=means, stddevs=stddevs, minvals=minvals, maxvals=maxvals)
>>> y.shape
TensorShape([10, 2, 3])
Args:
shape: A 1-D integer `Tensor` or Python array. The shape of the output
tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
means: A `Tensor` or Python value of type `dtype`. The mean of the truncated
normal distribution. This must broadcast with `stddevs`, `minvals` and
`maxvals`, and the broadcasted shape must be dominated by `shape`.
stddevs: A `Tensor` or Python value of type `dtype`. The standard deviation
of the truncated normal distribution. This must broadcast with `means`,
`minvals` and `maxvals`, and the broadcasted shape must be dominated by
`shape`.
minvals: A `Tensor` or Python value of type `dtype`. The minimum value of
the truncated normal distribution. This must broadcast with `means`,
`stddevs` and `maxvals`, and the broadcasted shape must be dominated by
`shape`.
maxvals: A `Tensor` or Python value of type `dtype`. The maximum value of
the truncated normal distribution. This must broadcast with `means`,
`stddevs` and `minvals`, and the broadcasted shape must be dominated by
`shape`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_parameterized_truncated_normal",
[shape, means, stddevs, minvals, maxvals]) as name:
shape_tensor = tensor_util.shape_tensor(shape)
means_tensor = ops.convert_to_tensor(means, name="means")
stddevs_tensor = ops.convert_to_tensor(stddevs, name="stddevs")
minvals_tensor = ops.convert_to_tensor(minvals, name="minvals")
maxvals_tensor = ops.convert_to_tensor(maxvals, name="maxvals")
rnd = gen_stateless_random_ops.stateless_parameterized_truncated_normal(
shape_tensor, seed, means_tensor, stddevs_tensor, minvals_tensor,
maxvals_tensor)
tensor_util.maybe_set_static_shape(rnd, shape)
return rnd
| 43.399538
| 85
| 0.685026
|
0f664171cf342894cde5116235673b018f8c2ae4
| 1,757
|
py
|
Python
|
Feature_Learning/learning_model/utils/reid_metric.py
|
Simon4Yan/feature_learning
|
23ecc1bb2ce3ce4bece9159ca4ecc420e3e8f34c
|
[
"MIT"
] | 17
|
2019-05-31T12:58:42.000Z
|
2021-05-26T09:47:50.000Z
|
Feature_Learning/learning_model/utils/reid_metric.py
|
GangmingZhao/feature_learning
|
23ecc1bb2ce3ce4bece9159ca4ecc420e3e8f34c
|
[
"MIT"
] | 2
|
2019-11-13T14:29:23.000Z
|
2022-02-06T07:13:28.000Z
|
Feature_Learning/learning_model/utils/reid_metric.py
|
GangmingZhao/feature_learning
|
23ecc1bb2ce3ce4bece9159ca4ecc420e3e8f34c
|
[
"MIT"
] | 3
|
2019-05-31T15:40:24.000Z
|
2020-11-09T07:44:46.000Z
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
import numpy as np
import torch
from ignite.metrics import Metric
from data.datasets.eval_reid import eval_func
from data.datasets.ranking import cmc
from data.datasets.ranking import mean_ap
import scipy.io
class R1_mAP(Metric):
def __init__(self, num_query, max_rank=50):
super(R1_mAP, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output):
feat, pid, camid = output
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self):
feats = torch.cat(self.feats, dim=0)
# query
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
# gallery
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
m, n = qf.shape[0], gf.shape[0]
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.cpu().numpy()
#dis_save = {'dis': distmat}
#scipy.io.savemat('distmat_ori.mat', dis_save)
cmc1, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
#cmc0 = cmc(distmat, q_pids, g_pids, q_camids, g_camids)
#mAP = mean_ap(distmat, q_pids, g_pids, q_camids, g_camids)
return cmc1, mAP
| 32.537037
| 76
| 0.620945
|
e6fa282860e4b8f0ecce9ae4ec6ff2784a2317ef
| 921
|
py
|
Python
|
conftest.py
|
konflic/python_qa_remotes
|
d36ddfb023b87a5439a78938c0f6842212a11e3d
|
[
"MIT"
] | null | null | null |
conftest.py
|
konflic/python_qa_remotes
|
d36ddfb023b87a5439a78938c0f6842212a11e3d
|
[
"MIT"
] | 1
|
2021-12-05T20:39:16.000Z
|
2021-12-05T20:39:16.000Z
|
conftest.py
|
konflic/python_qa_remotes
|
d36ddfb023b87a5439a78938c0f6842212a11e3d
|
[
"MIT"
] | 1
|
2021-12-09T13:44:13.000Z
|
2021-12-09T13:44:13.000Z
|
import pytest
from selenium import webdriver
def pytest_addoption(parser):
parser.addoption(
"--browser",
default="chrome",
choices=["chrome", "firefox", "opera", "MicrosoftEdge"]
)
parser.addoption("--executor", default="192.168.1.88")
@pytest.fixture
def firefox(request):
wd = webdriver.Firefox()
request.addfinalizer(wd.quit)
return wd
@pytest.fixture
def chrome(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
@pytest.fixture
def remote(request):
browser = request.config.getoption("--browser")
executor = request.config.getoption("--executor")
driver = webdriver.Remote(
command_executor=f"http://{executor}:4444/wd/hub",
desired_capabilities={"browserName": browser}
)
driver.implicitly_wait(2)
driver.maximize_window()
request.addfinalizer(driver.quit)
return driver
| 20.931818
| 63
| 0.673181
|
3b1913e2fd6208932f0574a858406fe08f305b85
| 72,040
|
py
|
Python
|
corehq/apps/domain/views/accounting.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/domain/views/accounting.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/domain/views/accounting.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
import datetime
import json
from collections import namedtuple
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator
from django.core.validators import validate_email
from django.db import transaction
from django.db.models import Sum
from django.http import (
Http404,
HttpResponse,
HttpResponseRedirect,
HttpResponseForbidden,
)
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.decorators.http import require_POST
from django.views.generic import View
import dateutil
from couchdbkit import ResourceNotFound
from django_prbac.utils import has_privilege
from memoized import memoized
from corehq.apps.accounting.decorators import always_allow_project_access
from corehq.apps.accounting.utils.downgrade import can_domain_unpause
from dimagi.utils.web import json_response
from corehq import privileges
from corehq.apps.accounting.async_handlers import Select2BillingInfoHandler
from corehq.apps.accounting.exceptions import (
NewSubscriptionError,
PaymentRequestError,
SubscriptionAdjustmentError,
SubscriptionRenewalError,
)
from corehq.apps.accounting.forms import (
AnnualPlanContactForm,
EnterprisePlanContactForm,
)
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.models import (
MINIMUM_SUBSCRIPTION_LENGTH,
UNLIMITED_FEATURE_USAGE,
BillingAccount,
BillingAccountType,
BillingRecord,
CreditLine,
CustomerInvoice,
DefaultProductPlan,
EntryPoint,
Invoice,
InvoicePdf,
LastPayment,
PaymentMethodType,
SoftwarePlanEdition,
StripePaymentMethod,
Subscription,
SubscriptionType,
WireInvoice,
)
from corehq.apps.accounting.payment_handlers import (
BulkStripePaymentHandler,
CreditStripePaymentHandler,
InvoiceStripePaymentHandler,
)
from corehq.apps.accounting.subscription_changes import (
DomainDowngradeStatusHandler,
)
from corehq.apps.accounting.usage import FeatureUsageCalculator
from corehq.apps.accounting.user_text import (
DESC_BY_EDITION,
get_feature_name,
get_feature_recurring_interval,
)
from corehq.apps.accounting.utils import (
fmt_dollar_amount,
get_change_status,
get_customer_cards,
get_privileges,
is_downgrade,
log_accounting_error,
quantize_accounting_decimal,
get_paused_plan_context,
pause_current_subscription,
)
from corehq.apps.domain.decorators import (
login_and_domain_required,
require_superuser,
LoginAndDomainMixin,
)
from corehq.apps.domain.forms import (
INTERNAL_SUBSCRIPTION_MANAGEMENT_FORMS,
AdvancedExtendedTrialForm,
ConfirmNewSubscriptionForm,
ConfirmSubscriptionRenewalForm,
ContractedPartnerForm,
DimagiOnlyEnterpriseForm,
EditBillingAccountInfoForm,
SelectSubscriptionTypeForm,
)
from corehq.apps.domain.views.base import DomainViewMixin
from corehq.apps.domain.views.settings import (
BaseAdminProjectSettingsView,
BaseProjectSettingsView,
)
from corehq.apps.hqwebapp.async_handler import AsyncHandlerMixin
from corehq.apps.hqwebapp.decorators import use_jquery_ui
from corehq.apps.hqwebapp.tasks import send_mail_async
from corehq.apps.hqwebapp.views import BasePageView, CRUDPaginatedViewMixin
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
from corehq.const import USER_DATE_FORMAT
PAYMENT_ERROR_MESSAGES = {
400: ugettext_lazy('Your request was not formatted properly.'),
403: ugettext_lazy('Forbidden.'),
404: ugettext_lazy('Page not found.'),
500: ugettext_lazy("There was an error processing your request."
" We're working quickly to fix the issue. Please try again shortly."),
}
class SubscriptionUpgradeRequiredView(LoginAndDomainMixin, BasePageView, DomainViewMixin):
page_title = ugettext_lazy("Upgrade Required")
template_name = "domain/insufficient_privilege_notification.html"
@property
def page_url(self):
return self.request.get_full_path
@property
def page_name(self):
return _("Sorry, you do not have access to %(feature_name)s") % {
'feature_name': self.feature_name,
}
@property
def is_domain_admin(self):
if not hasattr(self.request, 'couch_user'):
return False
return self.request.couch_user.is_domain_admin(self.domain)
@property
def page_context(self):
context = {
'domain': self.domain,
'feature_name': self.feature_name,
'plan_name': self.required_plan_name,
'change_subscription_url': reverse(SelectPlanView.urlname,
args=[self.domain]),
'is_domain_admin': self.is_domain_admin,
}
context.update(get_paused_plan_context(self.request, self.domain))
return context
@property
def missing_privilege(self):
return self.args[1]
@property
def feature_name(self):
return privileges.Titles.get_name_from_privilege(self.missing_privilege)
@property
def required_plan_name(self):
return DefaultProductPlan.get_lowest_edition([self.missing_privilege])
def get(self, request, *args, **kwargs):
self.request = request
self.args = args
return super(SubscriptionUpgradeRequiredView, self).get(
request, *args, **kwargs
)
class DomainAccountingSettings(BaseProjectSettingsView):
@method_decorator(always_allow_project_access)
@method_decorator(require_permission(Permissions.edit_billing))
def dispatch(self, request, *args, **kwargs):
return super(DomainAccountingSettings, self).dispatch(request, *args, **kwargs)
@property
@memoized
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
@property
def current_subscription(self):
return Subscription.get_active_subscription_by_domain(self.domain)
@property
def main_context(self):
context = super(DomainAccountingSettings, self).main_context
context['show_prepaid_modal'] = False
return context
class DomainSubscriptionView(DomainAccountingSettings):
urlname = 'domain_subscription_view'
template_name = 'domain/current_subscription.html'
page_title = ugettext_lazy("Current Subscription")
@property
def can_purchase_credits(self):
return self.request.couch_user.can_edit_billing()
@property
@memoized
def plan(self):
subscription = Subscription.get_active_subscription_by_domain(self.domain)
plan_version = subscription.plan_version if subscription else DefaultProductPlan.get_default_plan_version()
date_end = None
next_subscription = {
'exists': False,
'can_renew': False,
'name': None,
'price': None,
}
cards = None
trial_length = None
if subscription:
cards = get_customer_cards(self.request.user.username, self.domain)
date_end = (subscription.date_end.strftime(USER_DATE_FORMAT)
if subscription.date_end is not None else "--")
if subscription.date_end is not None:
if subscription.is_renewed:
next_subscription.update({
'exists': True,
'is_paused': subscription.next_subscription.plan_version.is_paused,
'date_start': subscription.next_subscription.date_start.strftime(USER_DATE_FORMAT),
'name': subscription.next_subscription.plan_version.plan.name,
'price': (
_("USD %s /month")
% subscription.next_subscription.plan_version.product_rate.monthly_fee
),
})
else:
days_left = (subscription.date_end - datetime.date.today()).days
next_subscription.update({
'can_renew': days_left <= 30,
'renew_url': reverse(SubscriptionRenewalView.urlname, args=[self.domain]),
})
if subscription.is_trial and subscription.date_end is not None:
trial_length = (subscription.date_end - subscription.date_start).days
if subscription:
credit_lines = CreditLine.get_non_general_credits_by_subscription(subscription)
credit_lines = [cl for cl in credit_lines if cl.balance > 0]
has_credits_in_non_general_credit_line = len(credit_lines) > 0
else:
has_credits_in_non_general_credit_line = False
info = {
'products': [self.get_product_summary(plan_version, self.account, subscription)],
'features': self.get_feature_summary(plan_version, self.account, subscription),
'general_credit': self._fmt_credit(self._credit_grand_total(
CreditLine.get_credits_by_subscription_and_features(
subscription
) if subscription else None
)),
'account_general_credit': self._fmt_credit(self._credit_grand_total(
CreditLine.get_credits_for_account(
self.account
) if self.account else None
)),
'css_class': "label-plan label-plan-%s" % plan_version.plan.edition.lower(),
'do_not_invoice': subscription.do_not_invoice if subscription is not None else False,
'is_trial': subscription.is_trial if subscription is not None else False,
'trial_length': trial_length,
'date_start': (subscription.date_start.strftime(USER_DATE_FORMAT)
if subscription is not None else None),
'date_end': date_end,
'cards': cards,
'next_subscription': next_subscription,
'has_credits_in_non_general_credit_line': has_credits_in_non_general_credit_line,
'is_annual_plan': plan_version.plan.is_annual_plan,
'is_paused': subscription.plan_version.is_paused,
'previous_subscription_edition': (
subscription.previous_subscription.plan_version.plan.edition
if subscription.previous_subscription else ""
),
}
info['has_account_level_credit'] = (
any(
product_info['account_credit'] and product_info['account_credit']['is_visible']
for product_info in info['products']
)
or info['account_general_credit'] and info['account_general_credit']['is_visible']
)
info.update(plan_version.user_facing_description)
return info
def _fmt_credit(self, credit_amount=None):
if credit_amount is None:
return {
'amount': "--",
}
return {
'amount': fmt_dollar_amount(credit_amount),
'is_visible': credit_amount != Decimal('0.0'),
}
def _credit_grand_total(self, credit_lines):
return sum([c.balance for c in credit_lines]) if credit_lines else Decimal('0.00')
def get_product_summary(self, plan_version, account, subscription):
product_rate = plan_version.product_rate
return {
'monthly_fee': _("USD %s /month") % product_rate.monthly_fee,
'subscription_credit': self._fmt_credit(self._credit_grand_total(
CreditLine.get_credits_by_subscription_and_features(
subscription, is_product=True
) if subscription else None
)),
'account_credit': self._fmt_credit(self._credit_grand_total(
CreditLine.get_credits_for_account(
account, is_product=True
) if account else None
)),
}
def get_feature_summary(self, plan_version, account, subscription):
def _get_feature_info(feature_rate):
usage = FeatureUsageCalculator(feature_rate, self.domain).get_usage()
feature_type = feature_rate.feature.feature_type
if feature_rate.monthly_limit == UNLIMITED_FEATURE_USAGE:
remaining = limit = _('Unlimited')
else:
limit = feature_rate.monthly_limit
remaining = limit - usage
if remaining < 0:
remaining = _("%d over limit") % (-1 * remaining)
return {
'name': get_feature_name(feature_type),
'usage': usage,
'limit': limit,
'remaining': remaining,
'type': feature_type,
'recurring_interval': get_feature_recurring_interval(feature_type),
'subscription_credit': self._fmt_credit(self._credit_grand_total(
CreditLine.get_credits_by_subscription_and_features(
subscription, feature_type=feature_type
) if subscription else None
)),
'account_credit': self._fmt_credit(self._credit_grand_total(
CreditLine.get_credits_for_account(
account, feature_type=feature_type
) if account else None
)),
}
return list(map(_get_feature_info, plan_version.feature_rates.all()))
@property
def page_context(self):
from corehq.apps.domain.views.sms import SMSRatesView
subs = self.current_subscription
return {
'plan': self.plan,
'change_plan_url': reverse(SelectPlanView.urlname, args=[self.domain]),
'can_purchase_credits': self.can_purchase_credits,
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'payment_error_messages': PAYMENT_ERROR_MESSAGES,
'sms_rate_calc_url': reverse(SMSRatesView.urlname,
args=[self.domain]),
'user_email': self.request.couch_user.username,
'show_account_credits': any(
feature['account_credit'].get('is_visible')
for feature in self.plan.get('features')
),
'can_change_subscription': subs and subs.user_can_change_subscription(self.request.user),
}
class EditExistingBillingAccountView(DomainAccountingSettings, AsyncHandlerMixin):
template_name = 'domain/update_billing_contact_info.html'
urlname = 'domain_update_billing_info'
page_title = ugettext_lazy("Billing Information")
async_handlers = [
Select2BillingInfoHandler,
]
@property
@memoized
def billing_info_form(self):
is_ops_user = has_privilege(self.request, privileges.ACCOUNTING_ADMIN)
if self.request.method == 'POST':
return EditBillingAccountInfoForm(
self.account, self.domain, self.request.couch_user.username, data=self.request.POST,
is_ops_user=is_ops_user
)
return EditBillingAccountInfoForm(self.account, self.domain, self.request.couch_user.username,
is_ops_user=is_ops_user)
def dispatch(self, request, *args, **kwargs):
if self.account is None:
raise Http404()
return super(EditExistingBillingAccountView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
return {
'billing_account_info_form': self.billing_info_form,
'cards': self._get_cards(),
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
}
def _get_cards(self):
if not settings.STRIPE_PRIVATE_KEY:
return []
user = self.request.user.username
payment_method, new_payment_method = StripePaymentMethod.objects.get_or_create(
web_user=user,
method_type=PaymentMethodType.STRIPE,
)
return payment_method.all_cards_serialized(self.account)
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
if self.billing_info_form.is_valid():
is_saved = self.billing_info_form.save()
if not is_saved:
messages.error(
request, _("It appears that there was an issue updating your contact information. "
"We've been notified of the issue. Please try submitting again, and if the problem "
"persists, please try in a few hours."))
else:
messages.success(
request, _("Billing contact information was successfully updated.")
)
return HttpResponseRedirect(reverse(EditExistingBillingAccountView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class DomainBillingStatementsView(DomainAccountingSettings, CRUDPaginatedViewMixin):
template_name = 'domain/billing_statements.html'
urlname = 'domain_billing_statements'
page_title = ugettext_lazy("Billing Statements")
limit_text = ugettext_lazy("statements per page")
empty_notification = ugettext_lazy("No Billing Statements match the current criteria.")
loading_message = ugettext_lazy("Loading statements...")
@property
def stripe_cards(self):
return get_customer_cards(self.request.user.username, self.domain)
@property
def show_hidden(self):
if not self.request.user.is_superuser:
return False
return bool(self.request.POST.get('additionalData[show_hidden]'))
@property
def show_unpaid(self):
try:
return json.loads(self.request.POST.get('additionalData[show_unpaid]'))
except TypeError:
return False
@property
def invoices(self):
invoices = Invoice.objects.filter(subscription__subscriber__domain=self.domain)
if not self.show_hidden:
invoices = invoices.filter(is_hidden=False)
if self.show_unpaid:
invoices = invoices.filter(date_paid__exact=None)
return invoices.order_by('-date_start', '-date_end')
@property
def total(self):
return self.paginated_invoices.count
@property
@memoized
def paginated_invoices(self):
return Paginator(self.invoices, self.limit)
@property
def total_balance(self):
"""
Returns the total balance of unpaid, unhidden invoices.
Doesn't take into account the view settings on the page.
"""
invoices = (Invoice.objects
.filter(subscription__subscriber__domain=self.domain)
.filter(date_paid__exact=None)
.filter(is_hidden=False))
return invoices.aggregate(
total_balance=Sum('balance')
).get('total_balance') or 0.00
@property
def column_names(self):
return [
_("Statement No."),
_("Plan"),
_("Billing Period"),
_("Date Due"),
_("Payment Status"),
_("PDF"),
]
@property
def page_context(self):
pagination_context = self.pagination_context
pagination_context.update({
'stripe_options': {
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'stripe_cards': self.stripe_cards,
},
'payment_error_messages': PAYMENT_ERROR_MESSAGES,
'payment_urls': {
'process_invoice_payment_url': reverse(
InvoiceStripePaymentView.urlname,
args=[self.domain],
),
'process_bulk_payment_url': reverse(
BulkStripePaymentView.urlname,
args=[self.domain],
),
'process_wire_invoice_url': reverse(
WireInvoiceView.urlname,
args=[self.domain],
),
},
'total_balance': self.total_balance,
'show_plan': True,
'show_overdue_invoice_modal': False,
})
return pagination_context
@property
def can_pay_invoices(self):
return self.request.couch_user.is_domain_admin(self.domain)
@property
def paginated_list(self):
for invoice in self.paginated_invoices.page(self.page).object_list:
try:
last_billing_record = BillingRecord.objects.filter(
invoice=invoice
).latest('date_created')
if invoice.is_paid:
payment_status = (_("Paid on %s.")
% invoice.date_paid.strftime(USER_DATE_FORMAT))
payment_class = "label label-default"
else:
payment_status = _("Not Paid")
payment_class = "label label-danger"
date_due = (
(invoice.date_due.strftime(USER_DATE_FORMAT)
if not invoice.is_paid else _("Already Paid"))
if invoice.date_due else _("None")
)
yield {
'itemData': {
'id': invoice.id,
'invoice_number': invoice.invoice_number,
'start': invoice.date_start.strftime(USER_DATE_FORMAT),
'end': invoice.date_end.strftime(USER_DATE_FORMAT),
'plan': invoice.subscription.plan_version.user_facing_description,
'payment_status': payment_status,
'payment_class': payment_class,
'date_due': date_due,
'pdfUrl': reverse(
BillingStatementPdfView.urlname,
args=[self.domain, last_billing_record.pdf_data_id]
),
'canMakePayment': (not invoice.is_paid
and self.can_pay_invoices),
'balance': "%s" % quantize_accounting_decimal(invoice.balance),
},
'template': 'statement-row-template',
}
except BillingRecord.DoesNotExist:
log_accounting_error(
"An invoice was generated for %(invoice_id)d "
"(domain: %(domain)s), but no billing record!" % {
'invoice_id': invoice.id,
'domain': self.domain,
},
show_stack_trace=True
)
def refresh_item(self, item_id):
pass
def post(self, *args, **kwargs):
return self.paginate_crud_response
def dispatch(self, request, *args, **kwargs):
if self.account is None:
raise Http404()
return super(DomainBillingStatementsView, self).dispatch(request, *args, **kwargs)
class BaseStripePaymentView(DomainAccountingSettings):
http_method_names = ['post']
@property
def account(self):
raise NotImplementedError("you must implement the property account")
@property
@memoized
def billing_admin(self):
if self.request.couch_user.can_edit_billing():
return self.request.couch_user.username
else:
raise PaymentRequestError(
"The logged in user was not a billing admin."
)
def get_or_create_payment_method(self):
return StripePaymentMethod.objects.get_or_create(
web_user=self.billing_admin,
method_type=PaymentMethodType.STRIPE,
)[0]
def get_payment_handler(self):
"""Returns a StripePaymentHandler object
"""
raise NotImplementedError("You must implement get_payment_handler()")
def post(self, request, *args, **kwargs):
try:
payment_handler = self.get_payment_handler()
response = payment_handler.process_request(request)
except PaymentRequestError as e:
log_accounting_error(
"Failed to process Stripe Payment due to bad "
"request for domain %(domain)s user %(web_user)s: "
"%(error)s" % {
'domain': self.domain,
'web_user': self.request.user.username,
'error': e,
}
)
response = {
'error': {
'message': _(
"There was an issue processing your payment. No "
"charges were made. We're looking into the issue "
"as quickly as possible. Sorry for the inconvenience."
)
}
}
return json_response(response)
class CreditsStripePaymentView(BaseStripePaymentView):
urlname = 'domain_credits_payment'
@property
@memoized
def account(self):
return BillingAccount.get_or_create_account_by_domain(
self.domain,
created_by=self.request.user.username,
account_type=BillingAccountType.USER_CREATED,
entry_point=EntryPoint.SELF_STARTED,
last_payment_method=LastPayment.CC_ONE_TIME,
)[0]
def get_payment_handler(self):
return CreditStripePaymentHandler(
self.get_or_create_payment_method(),
self.domain,
self.account,
subscription=Subscription.get_active_subscription_by_domain(self.domain),
post_data=self.request.POST.copy(),
)
class CreditsWireInvoiceView(DomainAccountingSettings):
http_method_names = ['post']
urlname = 'domain_wire_payment'
@method_decorator(login_and_domain_required)
def dispatch(self, request, *args, **kwargs):
return super(CreditsWireInvoiceView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
emails = request.POST.get('emails', []).split()
invalid_emails = []
for email in emails:
try:
validate_email(email)
except ValidationError:
invalid_emails.append(email)
if invalid_emails:
message = (_('The following e-mail addresses contain invalid characters, or are missing required '
'characters: ') + ', '.join(['"{}"'.format(email) for email in invalid_emails]))
return json_response({'error': {'message': message}})
amount = Decimal(request.POST.get('amount', 0))
general_credit = Decimal(request.POST.get('general_credit', 0))
wire_invoice_factory = DomainWireInvoiceFactory(request.domain, contact_emails=emails)
try:
wire_invoice_factory.create_wire_credits_invoice(amount, general_credit)
except Exception as e:
return json_response({'error': {'message': str(e)}})
return json_response({'success': True})
class InvoiceStripePaymentView(BaseStripePaymentView):
urlname = 'domain_invoice_payment'
@property
@memoized
def invoice(self):
try:
invoice_id = self.request.POST['invoice_id']
except IndexError:
raise PaymentRequestError("invoice_id is required")
try:
if self.account and self.account.is_customer_billing_account:
return CustomerInvoice.objects.get(pk=invoice_id)
else:
return Invoice.objects.get(pk=invoice_id)
except (Invoice.DoesNotExist, CustomerInvoice.DoesNotExist):
raise PaymentRequestError(
"Could not find a matching invoice for invoice_id '%s'"
% invoice_id
)
@property
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
def get_payment_handler(self):
return InvoiceStripePaymentHandler(
self.get_or_create_payment_method(), self.domain, self.invoice
)
class BulkStripePaymentView(BaseStripePaymentView):
urlname = 'domain_bulk_payment'
@property
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
def get_payment_handler(self):
return BulkStripePaymentHandler(
self.get_or_create_payment_method(), self.domain
)
class WireInvoiceView(View):
http_method_names = ['post']
urlname = 'domain_wire_invoice'
@method_decorator(always_allow_project_access)
@method_decorator(require_permission(Permissions.edit_billing))
def dispatch(self, request, *args, **kwargs):
return super(WireInvoiceView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
emails = request.POST.get('emails', []).split()
balance = Decimal(request.POST.get('customPaymentAmount', 0))
from corehq.apps.accounting.utils.account import (
get_account_or_404,
request_has_permissions_for_enterprise_admin,
)
account = get_account_or_404(request.domain)
if (account.is_customer_billing_account
and not request_has_permissions_for_enterprise_admin(request, account)):
return HttpResponseForbidden()
wire_invoice_factory = DomainWireInvoiceFactory(request.domain, contact_emails=emails, account=account)
try:
wire_invoice_factory.create_wire_invoice(balance)
except Exception as e:
return json_response({'error': {'message', e}})
return json_response({'success': True})
class BillingStatementPdfView(View):
urlname = 'domain_billing_statement_download'
@method_decorator(always_allow_project_access)
@method_decorator(require_permission(Permissions.edit_billing))
def dispatch(self, request, *args, **kwargs):
return super(BillingStatementPdfView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
domain = args[0]
statement_id = kwargs.get('statement_id')
if statement_id is None or domain is None:
raise Http404()
try:
invoice_pdf = InvoicePdf.get(statement_id)
except ResourceNotFound:
raise Http404()
try:
if invoice_pdf.is_wire:
invoice = WireInvoice.objects.get(
pk=invoice_pdf.invoice_id,
domain=domain
)
elif invoice_pdf.is_customer:
invoice = CustomerInvoice.objects.get(
pk=invoice_pdf.invoice_id
)
else:
invoice = Invoice.objects.get(
pk=invoice_pdf.invoice_id,
subscription__subscriber__domain=domain
)
except (Invoice.DoesNotExist, WireInvoice.DoesNotExist, CustomerInvoice.DoesNotExist):
raise Http404()
if invoice.is_customer_invoice:
from corehq.apps.accounting.utils.account import (
get_account_or_404,
request_has_permissions_for_enterprise_admin,
)
account = get_account_or_404(request.domain)
if not request_has_permissions_for_enterprise_admin(request, account):
return HttpResponseForbidden()
filename = "%(pdf_id)s_%(account)s_%(filename)s" % {
'pdf_id': invoice_pdf._id,
'account': account,
'filename': invoice_pdf.get_filename(invoice)
}
else:
if invoice.is_wire:
edition = 'Bulk'
else:
edition = DESC_BY_EDITION[invoice.subscription.plan_version.plan.edition]['name']
filename = "%(pdf_id)s_%(domain)s_%(edition)s_%(filename)s" % {
'pdf_id': invoice_pdf._id,
'domain': domain,
'edition': edition,
'filename': invoice_pdf.get_filename(invoice),
}
try:
data = invoice_pdf.get_data(invoice)
response = HttpResponse(data, content_type='application/pdf')
response['Content-Disposition'] = 'inline;filename="%s' % filename
except Exception as e:
log_accounting_error('Fetching invoice PDF failed: %s' % e)
return HttpResponse(_("Could not obtain billing statement. "
"An issue has been submitted."))
return response
class InternalSubscriptionManagementView(BaseAdminProjectSettingsView):
template_name = 'domain/internal_subscription_management.html'
urlname = 'internal_subscription_mgmt'
page_title = ugettext_lazy("Dimagi Internal Subscription Management")
form_classes = INTERNAL_SUBSCRIPTION_MANAGEMENT_FORMS
@method_decorator(always_allow_project_access)
@method_decorator(require_superuser)
@use_jquery_ui
def dispatch(self, request, *args, **kwargs):
return super(InternalSubscriptionManagementView, self).dispatch(request, *args, **kwargs)
@method_decorator(require_superuser)
def post(self, request, *args, **kwargs):
form = self.get_post_form
if form.is_valid():
try:
form.process_subscription_management()
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
except (NewSubscriptionError, SubscriptionAdjustmentError) as e:
messages.error(self.request, format_html(
'This request will require Ops assistance. '
'Please explain to <a href="mailto:{ops_email}">{ops_email}</a>'
' what you\'re trying to do and report the following error: <strong>"{error}"</strong>',
error=str(e),
ops_email=settings.ACCOUNTS_EMAIL)
)
return self.get(request, *args, **kwargs)
@property
def main_context(self):
context = super(InternalSubscriptionManagementView, self).main_context
context['show_prepaid_modal'] = False
return context
@property
def page_context(self):
return {
'is_form_editable': self.is_form_editable,
'plan_name': Subscription.get_subscribed_plan_by_domain(self.domain),
'select_subscription_type_form': self.select_subscription_type_form,
'subscription_management_forms': list(self.slug_to_form.values()),
'today': datetime.date.today(),
}
@property
def get_post_form(self):
return self.slug_to_form[self.request.POST.get('slug')]
@property
@memoized
def slug_to_form(self):
def create_form(form_class):
if self.request.method == 'POST' and form_class.slug == self.request.POST.get('slug'):
return form_class(self.domain, self.request.couch_user.username, self.request.POST)
return form_class(self.domain, self.request.couch_user.username)
return {form_class.slug: create_form(form_class) for form_class in self.form_classes}
@property
@memoized
def select_subscription_type_form(self):
if self.request.method == 'POST' and 'slug' in self.request.POST:
return SelectSubscriptionTypeForm({
'subscription_type': self.request.POST['slug'],
})
subscription_type = None
subscription = Subscription.get_active_subscription_by_domain(self.domain)
if subscription is None:
subscription_type = None
else:
plan = subscription.plan_version.plan
if subscription.service_type == SubscriptionType.IMPLEMENTATION:
subscription_type = ContractedPartnerForm.slug
elif plan.edition == SoftwarePlanEdition.ENTERPRISE:
subscription_type = DimagiOnlyEnterpriseForm.slug
elif plan.edition == SoftwarePlanEdition.ADVANCED:
subscription_type = AdvancedExtendedTrialForm.slug
return SelectSubscriptionTypeForm(
{'subscription_type': subscription_type},
disable_input=not self.is_form_editable,
)
@property
def is_form_editable(self):
return not self.slug_to_form[ContractedPartnerForm.slug].is_uneditable
PlanOption = namedtuple(
'PlanOption',
['name', 'monthly_price', 'annual_price', 'description']
)
class SelectPlanView(DomainAccountingSettings):
template_name = 'domain/select_plan.html'
urlname = 'domain_select_plan'
page_title = ugettext_lazy("Change Plan")
step_title = ugettext_lazy("Select Plan")
edition = None
lead_text = ugettext_lazy("Please select a plan below that fits your organization's needs.")
@property
@memoized
def can_domain_unpause(self):
return can_domain_unpause(self.domain)
@property
def plan_options(self):
return [
PlanOption(
SoftwarePlanEdition.STANDARD,
"$300",
"$250",
_("For programs with one-time data collection needs, simple "
"case management workflows, and basic M&E requirements."),
),
PlanOption(
SoftwarePlanEdition.PRO,
"$600",
"$500",
_("For programs with complex case management needs, field "
"staff collaborating on tasks, and M&E teams that need to "
"clean and report on data."),
),
PlanOption(
SoftwarePlanEdition.ADVANCED,
"$1200",
"$1000",
_("For programs with distributed field staff, facility-based "
"workflows, and advanced security needs. Also for M&E teams "
"integrating data with 3rd party analytics."),
),
PlanOption(
SoftwarePlanEdition.ENTERPRISE,
_("Contact Us"),
_("Contact Us"),
_("For organizations that need a sustainable path to scale "
"mobile data collection and service delivery across multiple "
"teams, programs, or countries."),
)
]
@property
def start_date_after_minimum_subscription(self):
if self.current_subscription is None:
return ""
elif self.current_subscription.is_trial:
return ""
else:
new_start_date = self.current_subscription.date_start + \
datetime.timedelta(days=MINIMUM_SUBSCRIPTION_LENGTH)
return new_start_date.strftime(USER_DATE_FORMAT)
@property
def next_subscription_edition(self):
if self.current_subscription is None:
return None
elif self.current_subscription.next_subscription is None:
return None
else:
return self.current_subscription.next_subscription.plan_version.plan.edition
@property
def edition_name(self):
if self.edition:
return DESC_BY_EDITION[self.edition]['name']
@property
def parent_pages(self):
return [
{
'title': DomainSubscriptionView.page_title,
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
}
]
@property
def steps(self):
edition_name = " (%s)" % self.edition_name if self.edition_name else ""
return [
{
'title': _("1. Select a Plan%(edition_name)s") % {
"edition_name": edition_name
},
'url': reverse(SelectPlanView.urlname, args=[self.domain]),
}
]
@property
def main_context(self):
context = super(SelectPlanView, self).main_context
context.update({
'steps': self.steps,
'step_title': self.step_title,
'lead_text': self.lead_text,
})
return context
@property
def page_context(self):
if self.current_subscription is not None and not self.current_subscription.is_trial:
current_price = self.current_subscription.plan_version.product_rate.monthly_fee
default_price = DefaultProductPlan.get_default_plan_version(
edition=self.current_subscription.plan_version.plan.edition
).product_rate.monthly_fee
else:
current_price = 0
default_price = 0
return {
'editions': [
edition.lower()
for edition in [
SoftwarePlanEdition.COMMUNITY,
SoftwarePlanEdition.STANDARD,
SoftwarePlanEdition.PRO,
SoftwarePlanEdition.ADVANCED,
SoftwarePlanEdition.ENTERPRISE,
]
],
'plan_options': [p._asdict() for p in self.plan_options],
'current_edition': (self.current_subscription.plan_version.plan.edition.lower()
if self.current_subscription is not None
and not self.current_subscription.is_trial
else ""),
'current_price': "${0:.0f}".format(current_price),
'is_price_discounted': current_price < default_price,
'start_date_after_minimum_subscription': self.start_date_after_minimum_subscription,
'subscription_below_minimum': (self.current_subscription.is_below_minimum_subscription
if self.current_subscription is not None else False),
'next_subscription_edition': self.next_subscription_edition,
'can_domain_unpause': self.can_domain_unpause,
}
class SelectedEnterprisePlanView(SelectPlanView):
template_name = 'domain/selected_enterprise_plan.html'
urlname = 'enterprise_request_quote'
step_title = ugettext_lazy("Contact Dimagi")
edition = SoftwarePlanEdition.ENTERPRISE
@property
def steps(self):
last_steps = super(SelectedEnterprisePlanView, self).steps
last_steps.append({
'title': _("2. Contact Dimagi"),
'url': reverse(SelectedEnterprisePlanView.urlname, args=[self.domain]),
})
return last_steps
@property
@memoized
def is_not_redirect(self):
return 'plan_edition' not in self.request.POST
@property
@memoized
def enterprise_contact_form(self):
if self.request.method == 'POST' and self.is_not_redirect:
return EnterprisePlanContactForm(self.domain, self.request.couch_user, data=self.request.POST)
return EnterprisePlanContactForm(self.domain, self.request.couch_user)
@property
def page_context(self):
return {
'enterprise_contact_form': self.enterprise_contact_form,
}
def post(self, request, *args, **kwargs):
if self.is_not_redirect and self.enterprise_contact_form.is_valid():
self.enterprise_contact_form.send_message()
messages.success(request, _("Your request was sent to Dimagi. "
"We will follow up shortly."))
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class SelectedAnnualPlanView(SelectPlanView):
template_name = 'domain/selected_annual_plan.html'
urlname = 'annual_plan_request_quote'
step_title = ugettext_lazy("Contact Dimagi")
edition = None
@property
def steps(self):
last_steps = super(SelectedAnnualPlanView, self).steps
last_steps.append({
'title': _("2. Contact Dimagi"),
'url': reverse(SelectedAnnualPlanView.urlname, args=[self.domain]),
})
return last_steps
@property
def on_annual_plan(self):
if self.current_subscription is None:
return False
else:
return self.current_subscription.plan_version.plan.is_annual_plan
@property
@memoized
def is_not_redirect(self):
return 'plan_edition' not in self.request.POST
@property
@memoized
def edition(self):
if self.on_annual_plan:
return self.current_subscription.plan_version.plan.edition
edition = self.request.GET.get('plan_edition').title()
if edition not in [e[0] for e in SoftwarePlanEdition.CHOICES]:
raise Http404()
return edition
@property
@memoized
def annual_plan_contact_form(self):
if self.request.method == 'POST' and self.is_not_redirect:
return AnnualPlanContactForm(self.domain, self.request.couch_user, self.on_annual_plan,
data=self.request.POST)
return AnnualPlanContactForm(self.domain, self.request.couch_user, self.on_annual_plan)
@property
def page_context(self):
return {
'annual_plan_contact_form': self.annual_plan_contact_form,
'on_annual_plan': self.on_annual_plan,
'edition': self.edition,
'selected_enterprise_plan': self.edition == SoftwarePlanEdition.ENTERPRISE
}
def post(self, request, *args, **kwargs):
if self.is_not_redirect and self.annual_plan_contact_form.is_valid():
self.annual_plan_contact_form.send_message()
messages.success(request, _("Your request was sent to Dimagi. "
"We will try our best to follow up in a timely manner."))
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class ConfirmSelectedPlanView(SelectPlanView):
template_name = 'domain/confirm_plan.html'
urlname = 'confirm_selected_plan'
@property
def step_title(self):
if self.is_paused:
return _("Confirm Pause")
return _("Confirm Subscription")
@property
def steps(self):
last_steps = super(ConfirmSelectedPlanView, self).steps
last_steps.append({
'title': _("2. Confirm Pause") if self.is_paused else _("2. Confirm Subscription"),
'url': reverse(SelectPlanView.urlname, args=[self.domain]),
})
return last_steps
@property
def is_paused(self):
return self.edition == SoftwarePlanEdition.PAUSED
@property
@memoized
def edition(self):
edition = self.request.POST.get('plan_edition').title()
if edition not in [e[0] for e in SoftwarePlanEdition.CHOICES]:
raise Http404()
return edition
@property
@memoized
def selected_plan_version(self):
return DefaultProductPlan.get_default_plan_version(self.edition)
@property
def downgrade_messages(self):
subscription = Subscription.get_active_subscription_by_domain(self.domain)
downgrades = get_change_status(
subscription.plan_version if subscription else None,
self.selected_plan_version
)[1]
downgrade_handler = DomainDowngradeStatusHandler(
self.domain_object, self.selected_plan_version, downgrades,
)
return downgrade_handler.get_response()
@property
def is_upgrade(self):
if self.current_subscription.is_trial:
return True
elif self.current_subscription.plan_version.plan.edition == self.edition:
return False
else:
return not is_downgrade(
current_edition=self.current_subscription.plan_version.plan.edition,
next_edition=self.edition
)
@property
def is_same_edition(self):
return self.current_subscription.plan_version.plan.edition == self.edition
@property
def is_downgrade_before_minimum(self):
if self.is_upgrade:
return False
elif self.current_subscription is None or self.current_subscription.is_trial:
return False
elif self.current_subscription.is_below_minimum_subscription:
return True
else:
return False
@property
def current_subscription_end_date(self):
if self.is_downgrade_before_minimum:
return self.current_subscription.date_start + \
datetime.timedelta(days=MINIMUM_SUBSCRIPTION_LENGTH)
else:
return datetime.date.today()
@property
def next_invoice_date(self):
# Next invoice date is the first day of the next month
return datetime.date.today().replace(day=1) + dateutil.relativedelta.relativedelta(months=1)
@property
def page_context(self):
return {
'downgrade_messages': self.downgrade_messages,
'is_upgrade': self.is_upgrade,
'is_same_edition': self.is_same_edition,
'next_invoice_date': self.next_invoice_date.strftime(USER_DATE_FORMAT),
'current_plan': (self.current_subscription.plan_version.plan.edition
if self.current_subscription is not None else None),
'is_downgrade_before_minimum': self.is_downgrade_before_minimum,
'current_subscription_end_date': self.current_subscription_end_date.strftime(USER_DATE_FORMAT),
'start_date_after_minimum_subscription': self.start_date_after_minimum_subscription,
'new_plan_edition': self.edition,
'is_paused': self.is_paused,
'tile_css': 'tile-{}'.format(self.edition.lower()),
}
@property
def main_context(self):
context = super(ConfirmSelectedPlanView, self).main_context
context.update({
'plan': (self.current_subscription.plan_version.user_facing_description if self.is_same_edition
else self.selected_plan_version.user_facing_description),
})
return context
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse(SelectPlanView.urlname, args=[self.domain]))
def post(self, request, *args, **kwargs):
if not self.can_domain_unpause:
return HttpResponseRedirect(reverse(SelectPlanView.urlname, args=[self.domain]))
if self.edition == SoftwarePlanEdition.ENTERPRISE:
return HttpResponseRedirect(reverse(SelectedEnterprisePlanView.urlname, args=[self.domain]))
return super(ConfirmSelectedPlanView, self).get(request, *args, **kwargs)
class ConfirmBillingAccountInfoView(ConfirmSelectedPlanView, AsyncHandlerMixin):
template_name = 'domain/confirm_billing_info.html'
urlname = 'confirm_billing_account_info'
step_title = ugettext_lazy("Confirm Billing Information")
is_new = False
async_handlers = [
Select2BillingInfoHandler,
]
@property
def steps(self):
last_steps = super(ConfirmBillingAccountInfoView, self).steps
last_steps.append({
'title': _("3. Confirm Billing Account"),
'url': reverse(ConfirmBillingAccountInfoView.urlname, args=[self.domain]),
})
return last_steps
@property
@memoized
def account(self):
if self.current_subscription:
return self.current_subscription.account
account, self.is_new = BillingAccount.get_or_create_account_by_domain(
self.domain,
created_by=self.request.couch_user.username,
account_type=BillingAccountType.USER_CREATED,
entry_point=EntryPoint.SELF_STARTED,
)
return account
@property
def payment_method(self):
user = self.request.user.username
payment_method, __ = StripePaymentMethod.objects.get_or_create(
web_user=user,
method_type=PaymentMethodType.STRIPE,
)
return payment_method
@property
@memoized
def is_form_post(self):
return 'company_name' in self.request.POST
@property
def downgrade_email_note(self):
if self.is_upgrade:
return None
if self.is_same_edition:
return None
return _get_downgrade_or_pause_note(self.request)
@property
@memoized
def billing_account_info_form(self):
if self.request.method == 'POST' and self.is_form_post:
return ConfirmNewSubscriptionForm(
self.account, self.domain, self.request.couch_user.username,
self.selected_plan_version, self.current_subscription, data=self.request.POST
)
return ConfirmNewSubscriptionForm(self.account, self.domain, self.request.couch_user.username,
self.selected_plan_version, self.current_subscription)
@property
def page_context(self):
return {
'billing_account_info_form': self.billing_account_info_form,
'stripe_public_key': settings.STRIPE_PUBLIC_KEY,
'cards': self.payment_method.all_cards_serialized(self.account),
'downgrade_email_note': self.downgrade_email_note
}
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
if not self.can_domain_unpause:
return HttpResponseRedirect(reverse(SelectPlanView.urlname, args=[self.domain]))
if self.is_form_post and self.billing_account_info_form.is_valid():
if not self.current_subscription.user_can_change_subscription(self.request.user):
messages.error(
request, _(
"You do not have permission to change the subscription for this customer-level account. "
"Please reach out to the %s enterprise admin for help."
) % self.account.name
)
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
is_saved = self.billing_account_info_form.save()
software_plan_name = DESC_BY_EDITION[self.selected_plan_version.plan.edition]['name']
next_subscription = self.current_subscription.next_subscription
if is_saved:
if not request.user.is_superuser:
if self.billing_account_info_form.is_same_edition():
self.send_keep_subscription_email()
elif self.billing_account_info_form.is_downgrade_from_paid_plan():
self.send_downgrade_email()
if self.billing_account_info_form.is_same_edition():
# Choosing to keep the same subscription
message = _(
"Thank you for choosing to stay with your %(software_plan_name)s "
"Edition Plan subscription."
) % {
'software_plan_name': software_plan_name,
}
elif next_subscription is not None:
# New subscription has been scheduled for the future
current_subscription_edition = self.current_subscription.plan_version.plan.edition
start_date = next_subscription.date_start.strftime(USER_DATE_FORMAT)
message = _(
"You have successfully scheduled your current %(current_subscription_edition)s "
"Edition Plan subscription to downgrade to the %(software_plan_name)s Edition Plan "
"on %(start_date)s."
) % {
'current_subscription_edition': current_subscription_edition,
'software_plan_name': software_plan_name,
'start_date': start_date,
}
else:
message = _(
"Your project has been successfully subscribed to the %s Edition Plan."
) % software_plan_name
messages.success(
request, message
)
return HttpResponseRedirect(reverse(DomainSubscriptionView.urlname, args=[self.domain]))
downgrade_date = next_subscription.date_start.strftime(USER_DATE_FORMAT)
messages.error(
request, _(
"You have already scheduled a downgrade to the %(software_plan_name)s Software Plan on "
"%(downgrade_date)s. If this is a mistake, please reach out to %(contact_email)."
) % {
'software_plan_name': software_plan_name,
'downgrade_date': downgrade_date,
'contact_email': settings.INVOICING_CONTACT_EMAIL,
}
)
return super(ConfirmBillingAccountInfoView, self).post(request, *args, **kwargs)
def send_downgrade_email(self):
message = '\n'.join([
'{user} is downgrading the subscription for {domain} from {old_plan} to {new_plan}',
'',
'{note}',
]).format(
user=self.request.couch_user.username,
domain=self.request.domain,
old_plan=self.request.POST.get('old_plan', 'unknown'),
new_plan=self.request.POST.get('new_plan', 'unknown'),
note=self.request.POST.get('downgrade_email_note', 'none')
)
send_mail_async.delay(
'{}Subscription downgrade for {}'.format(
'[staging] ' if settings.SERVER_ENVIRONMENT == "staging" else "",
self.request.domain
), message, settings.DEFAULT_FROM_EMAIL, [settings.GROWTH_EMAIL]
)
def send_keep_subscription_email(self):
message = '\n'.join([
'{user} decided to keep their subscription for {domain} of {new_plan}',
]).format(
user=self.request.couch_user.username,
domain=self.request.domain,
old_plan=self.request.POST.get('old_plan', 'unknown'),
)
send_mail_async.delay(
'{}Subscription kept for {}'.format(
'[staging] ' if settings.SERVER_ENVIRONMENT == "staging" else "",
self.request.domain
), message, settings.DEFAULT_FROM_EMAIL, [settings.GROWTH_EMAIL]
)
class SubscriptionMixin(object):
@property
@memoized
def subscription(self):
subscription = Subscription.get_active_subscription_by_domain(self.domain)
if subscription is None:
raise Http404
if subscription.is_renewed:
raise Http404
if subscription.plan_version.is_paused:
raise Http404
return subscription
class SubscriptionRenewalView(SelectPlanView, SubscriptionMixin):
urlname = "domain_subscription_renewal"
page_title = ugettext_lazy("Renew Plan")
step_title = ugettext_lazy("Renew Plan")
template_name = "domain/renew_plan.html"
@property
def lead_text(self):
return ugettext_lazy("Based on your current usage we recommend you use the <strong>{plan}</strong> plan"
.format(plan=self.current_subscription.plan_version.plan.edition))
@property
def page_context(self):
context = super(SubscriptionRenewalView, self).page_context
current_edition = self.subscription.plan_version.plan.edition
if current_edition in [
SoftwarePlanEdition.COMMUNITY,
SoftwarePlanEdition.PAUSED,
]:
raise Http404()
context.update({
'current_edition': current_edition,
'plan': self.subscription.plan_version.user_facing_description,
'tile_css': 'tile-{}'.format(current_edition.lower()),
'is_renewal_page': True,
})
return context
class ConfirmSubscriptionRenewalView(SelectPlanView, DomainAccountingSettings, AsyncHandlerMixin, SubscriptionMixin):
template_name = 'domain/confirm_subscription_renewal.html'
urlname = 'domain_subscription_renewal_confirmation'
page_title = ugettext_lazy("Confirm Billing Information")
step_title = ugettext_lazy("Confirm Billing Information")
async_handlers = [
Select2BillingInfoHandler,
]
@property
def is_request_from_current_step(self):
return self.request.method == 'POST' and "from_plan_page" not in self.request.POST
@method_decorator(require_POST)
def dispatch(self, request, *args, **kwargs):
return super(ConfirmSubscriptionRenewalView, self).dispatch(request, *args, **kwargs)
@property
@memoized
def next_plan_version(self):
plan_version = DefaultProductPlan.get_default_plan_version(self.new_edition)
if plan_version is None:
try:
# needed for sending to sentry
raise SubscriptionRenewalError()
except SubscriptionRenewalError:
log_accounting_error(
f"Could not find a matching renewable plan "
f"for {self.domain}, "
f"subscription number {self.subscription.pk}."
)
raise Http404
return plan_version
@property
@memoized
def confirm_form(self):
if self.is_request_from_current_step:
return ConfirmSubscriptionRenewalForm(
self.account, self.domain, self.request.couch_user.username,
self.subscription, self.next_plan_version,
data=self.request.POST,
)
return ConfirmSubscriptionRenewalForm(
self.account, self.domain, self.request.couch_user.username,
self.subscription, self.next_plan_version,
)
@property
def page_context(self):
return {
'subscription': self.subscription,
'plan': self.subscription.plan_version.user_facing_description,
'confirm_form': self.confirm_form,
'next_plan': self.next_plan_version.user_facing_description,
'is_renewal_page': True,
}
@property
def new_edition(self):
return self.request.POST.get('plan_edition').title()
def post(self, request, *args, **kwargs):
if self.async_response is not None:
return self.async_response
if self.new_edition == SoftwarePlanEdition.ENTERPRISE:
return HttpResponseRedirect(reverse(SelectedEnterprisePlanView.urlname, args=[self.domain]))
if (not self.is_request_from_current_step
and self.new_edition not in SoftwarePlanEdition.SELF_RENEWABLE_EDITIONS):
messages.error(
request,
_("Your subscription is not eligible for self-renewal. "
"Please sign up for a new subscription instead or contact {}"
).format(settings.BILLING_EMAIL)
)
return HttpResponseRedirect(
reverse(DomainSubscriptionView.urlname, args=[self.domain])
)
if self.confirm_form.is_valid():
is_saved = self.confirm_form.save()
if not is_saved:
messages.error(
request, _(
"There was an issue renewing your subscription. We "
"have been notified of the issue. Please try "
"submitting again, and if the problem persists, "
"please try in a few hours."
)
)
else:
messages.success(
request, _("Your subscription was successfully renewed!")
)
return HttpResponseRedirect(
reverse(DomainSubscriptionView.urlname, args=[self.domain])
)
return self.get(request, *args, **kwargs)
class EmailOnDowngradeView(View):
urlname = "email_on_downgrade"
def post(self, request, *args, **kwargs):
message = '\n'.join([
'{user} is downgrading the subscription for {domain} from {old_plan} to {new_plan}.',
'',
'Note from user: {note}',
]).format(
user=request.couch_user.username,
domain=request.domain,
old_plan=request.POST.get('old_plan', 'unknown'),
new_plan=request.POST.get('new_plan', 'unknown'),
note=request.POST.get('note', 'none'),
)
send_mail_async.delay(
'{}Subscription downgrade for {}'.format(
'[staging] ' if settings.SERVER_ENVIRONMENT == "staging" else "",
request.domain
), message, settings.DEFAULT_FROM_EMAIL, [settings.GROWTH_EMAIL]
)
return json_response({'success': True})
class BaseCardView(DomainAccountingSettings):
@property
def payment_method(self):
payment_method, __ = StripePaymentMethod.objects.get_or_create(
web_user=self.request.user.username,
method_type=PaymentMethodType.STRIPE,
)
return payment_method
def _generic_error(self):
error = ("Something went wrong while processing your request. "
"We're working quickly to resolve the issue. "
"Please try again in a few hours.")
return json_response({'error': error}, status_code=500)
def _stripe_error(self, e):
body = e.json_body
err = body['error']
return json_response({'error': err['message'],
'cards': self.payment_method.all_cards_serialized(self.account)},
status_code=502)
class CardView(BaseCardView):
"""View for dealing with a single Credit Card"""
url_name = "card_view"
def post(self, request, domain, card_token):
try:
card = self.payment_method.get_card(card_token)
if request.POST.get("is_autopay") == 'true':
self.payment_method.set_autopay(card, self.account, domain)
elif request.POST.get("is_autopay") == 'false':
self.payment_method.unset_autopay(card, self.account)
except self.payment_method.STRIPE_GENERIC_ERROR as e:
return self._stripe_error(e)
except Exception as e:
return self._generic_error()
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
def delete(self, request, domain, card_token):
try:
self.payment_method.remove_card(card_token)
except self.payment_method.STRIPE_GENERIC_ERROR as e:
return self._stripe_error(e)
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
class CardsView(BaseCardView):
"""View for dealing Credit Cards"""
url_name = "cards_view"
def get(self, request, domain):
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
def post(self, request, domain):
stripe_token = request.POST.get('token')
autopay = request.POST.get('autopay') == 'true'
try:
self.payment_method.create_card(stripe_token, self.account, domain, autopay)
except self.payment_method.STRIPE_GENERIC_ERROR as e:
return self._stripe_error(e)
except Exception as e:
return self._generic_error()
return json_response({'cards': self.payment_method.all_cards_serialized(self.account)})
def _get_downgrade_or_pause_note(request, is_pause=False):
downgrade_reason = request.POST.get('downgrade_reason')
will_project_restart = request.POST.get('will_project_restart')
new_tool = request.POST.get('new_tool')
new_tool_reason = request.POST.get('new_tool_reason')
feedback = request.POST.get('feedback')
if not downgrade_reason:
return None
return "\n".join([
"Why are you {method} your subscription today?\n{reason}\n",
"Do you think your project may start again?\n{will_project_restart}\n",
"Could you indicate which new tool you are using?\n{new_tool}\n",
"Why are you switching to a new tool?\n{new_tool_reason}\n",
"Additional feedback:\n{feedback}\n\n"
]).format(
method="pausing" if is_pause else "downgrading",
reason=downgrade_reason,
will_project_restart=will_project_restart,
new_tool=new_tool,
new_tool_reason=new_tool_reason,
feedback=feedback,
)
@require_POST
@login_and_domain_required
@require_permission(Permissions.edit_billing)
def pause_subscription(request, domain):
current_subscription = Subscription.get_active_subscription_by_domain(domain)
if not current_subscription.user_can_change_subscription(request.user):
messages.error(
request, _(
"You do not have permission to pause the subscription for this customer-level account. "
"Please reach out to the %s enterprise admin for help."
) % current_subscription.account.name
)
return HttpResponseRedirect(
reverse(DomainSubscriptionView.urlname, args=[domain])
)
try:
with transaction.atomic():
paused_subscription = pause_current_subscription(
domain, request.couch_user.username, current_subscription
)
pause_message = '\n'.join([
"{user} is pausing the subscription for {domain} from {old_plan}\n",
"{note}"
]).format(
user=request.couch_user.username,
domain=domain,
old_plan=current_subscription.plan_version.plan.edition,
note=_get_downgrade_or_pause_note(request, True),
)
send_mail_async.delay(
"{}Subscription pausing for {}".format(
'[staging] ' if settings.SERVER_ENVIRONMENT == "staging" else "",
domain,
), pause_message, settings.DEFAULT_FROM_EMAIL,
[settings.GROWTH_EMAIL]
)
if current_subscription.is_below_minimum_subscription:
messages.success(request, _(
"Your project's subscription will be paused on {}. "
"We hope to see you again!"
).format(paused_subscription.date_start.strftime(USER_DATE_FORMAT)))
else:
messages.success(
request, _("Your project's subscription has now been paused. "
"We hope to see you again!")
)
except Exception as e:
log_accounting_error(
"There was an error pausing the subscription for the domain '{}'. "
"Message: {}".format(domain, str(e)),
show_stack_trace=True
)
messages.error(
request, _("We were not able to pause your subscription at this time. "
"Please contact {} if you continue to receive this error. "
"We apologize for the inconvenience.").format(
settings.BILLING_EMAIL,
)
)
return HttpResponseRedirect(
reverse(DomainSubscriptionView.urlname, args=[domain])
)
| 38.982684
| 117
| 0.625208
|
d039f03a2c8a283a14014218f2596fcb2d60b02d
| 16,997
|
py
|
Python
|
pylib/gyp/generator/eclipse.py
|
chlorm-forks/gyp
|
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
|
[
"BSD-3-Clause"
] | 77
|
2018-07-01T15:55:34.000Z
|
2022-03-30T09:16:54.000Z
|
pylib/gyp/generator/eclipse.py
|
chlorm-forks/gyp
|
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
|
[
"BSD-3-Clause"
] | 116
|
2021-05-29T16:32:51.000Z
|
2021-08-13T16:05:29.000Z
|
pylib/gyp/generator/eclipse.py
|
chlorm-forks/gyp
|
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
|
[
"BSD-3-Clause"
] | 53
|
2018-04-13T12:06:06.000Z
|
2022-03-25T13:54:38.000Z
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if 'include_dirs' in config:
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.keys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations']
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| 39.899061
| 80
| 0.666765
|
e847907169f8111a8b3b2c0175a250aabfebcb88
| 2,295
|
py
|
Python
|
tickit/utils/configuration/configurable.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 4
|
2021-09-16T13:35:33.000Z
|
2022-02-01T23:35:53.000Z
|
tickit/utils/configuration/configurable.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 46
|
2021-09-16T13:44:58.000Z
|
2022-02-02T13:42:56.000Z
|
tickit/utils/configuration/configurable.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, DefaultDict, Dict, Iterator, Type, TypeVar
from apischema import deserializer
from apischema.conversions import Conversion
from apischema.tagged_unions import Tagged, TaggedUnion, get_tagged
# Implementation adapted from apischema example: Class as tagged union of its subclasses
# see: https://wyfo.github.io/apischema/examples/subclass_tagged_union/
#: A class
Cls = TypeVar("Cls", bound=type)
def rec_subclasses(cls: type) -> Iterator[type]:
"""Recursive implementation of type.__subclasses__.
Args:
cls (Type): The base class.
Returns:
Iterator[type]: An iterator of subclasses.
"""
for sub_cls in cls.__subclasses__():
yield sub_cls
yield from rec_subclasses(sub_cls)
#: Whether the current class is registered as a tagged union
is_tagged_union: Dict[Type[Any], bool] = DefaultDict(lambda: False)
def as_tagged_union(cls: Cls) -> Cls:
"""A decorator to make a config base class which can deserialize aliased sub-classes.
A decorator which makes a config class the root of a tagged union of sub-classes
allowing for serialization and deserialization of config trees by class alias. The
function registers both an apischema serialization and an apischema deserialization
conversion for the base class which perform lookup based on a tagged union of
aliased sub-classes.
Args:
cls (Cls): The config base class.
Returns:
Cls: The modified config base class.
"""
# This will only be used if we want to generate a json schema (which we will)
def deserialization() -> Conversion:
annotations: Dict[str, Any] = {}
deserialization_namespace: Dict[str, Any] = {"__annotations__": annotations}
for sub in rec_subclasses(cls):
fullname = sub.__module__ + "." + sub.__name__
annotations[fullname] = Tagged[sub] # type: ignore
deserialization_union = type(
cls.__name__ + "TaggedUnion",
(TaggedUnion,),
deserialization_namespace,
)
return Conversion(
lambda obj: get_tagged(obj)[1], source=deserialization_union, target=cls
)
deserializer(lazy=deserialization, target=cls)
is_tagged_union[cls] = True
return cls
| 34.772727
| 89
| 0.696296
|
708af50d7bc07d517419931e78935e86e6683016
| 1,304
|
py
|
Python
|
test_analysis.py
|
jgm48/ia-flood-risk-project
|
b0e61bae8ff4c00bf767e95f7e271760c0bff543
|
[
"MIT"
] | null | null | null |
test_analysis.py
|
jgm48/ia-flood-risk-project
|
b0e61bae8ff4c00bf767e95f7e271760c0bff543
|
[
"MIT"
] | null | null | null |
test_analysis.py
|
jgm48/ia-flood-risk-project
|
b0e61bae8ff4c00bf767e95f7e271760c0bff543
|
[
"MIT"
] | null | null | null |
#test_analysis
"""Unit test for analysis module"""
import datetime
import numpy
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.analysis import polyfit, risk_level_towns
from floodsystem.datafetcher import fetch_measure_levels
def test_polyfit():
"""Tests polyfit function"""
# station data as before
stations = build_station_list()
update_water_levels(stations)
dt = 10
assert type(stations) == list
# output data types
for x in range(1, 1500, 100):
i = stations[x]
dates, levels = fetch_measure_levels(i.measure_id, dt=datetime.timedelta(days=dt))
for n in range(2, 4):
continue
assert type(polyfit(dates, levels, n)[0]) == numpy.poly1d
assert type(polyfit(dates, levels, n)[1]) == numpy.float64
def test_risk_level_towns():
"""Tests risk_level_towns function"""
stations = build_station_list()
update_water_levels(stations)
severe, high, moderate, low = risk_level_towns()
# check that most towns belong to a risk level
for i in range(0, len(stations) - 1, 250):
assert stations[i].town in severe or stations[i].town in high or stations[i].town in moderate or stations[i].town in low
| 31.047619
| 128
| 0.677147
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.