hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1dd38ea121e2d430d7fb7c87f94d9f09bfe54d8a
| 4,091
|
py
|
Python
|
sources/Lexer.py
|
juliennix/NXLT
|
73c1453027230f3073a60758421541086eba1d58
|
[
"MIT"
] | null | null | null |
sources/Lexer.py
|
juliennix/NXLT
|
73c1453027230f3073a60758421541086eba1d58
|
[
"MIT"
] | null | null | null |
sources/Lexer.py
|
juliennix/NXLT
|
73c1453027230f3073a60758421541086eba1d58
|
[
"MIT"
] | null | null | null |
###############################################################################
# Lexer.py #
# #
# Lexer.py is used to creates Token from an input file #
# use simply python Lexer.py name_of_inputfile to run the code #
###############################################################################
import sys
import ply.lex as lex
# List of the tokens names
tokens = [
# Identifier
'IDENTIFIER', 'NUMBER', 'FLOAT',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'UMINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'COLON',
'NEWLINE', 'HASHTAG',
# Generic values
'TRUE', 'FALSE',
]
# Reserved words
reserved = {
'if' : 'IF',
'else' : 'ELSE',
'while' : 'WHILE',
'continue' : 'CONTINUE',
'break' : 'BREAK',
'out' : 'OUT',
'void' : 'VOID',
'print' : 'PRINT',
'new' : 'NEW',
}
tokens += reserved.values()
# Classic operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
# Logic operators
t_OR = r'or'
t_AND = r'and'
t_NOT = r'not'
t_XOR = r'xor'
# Binary operators
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
# Comparator operators
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_COLON = r':'
t_HASHTAG = r'\#'
# Generic values
t_TRUE = r'true'
t_FALSE = r'false'
# Function Name definition (Identifier) except reserved(token) ones
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_]*[\']*'
if t.value.upper() in tokens:
t.type = t.value.upper()
return t
def t_FLOAT(t):
r'-?\d+\.\d*(e-?\d+)?'
t.value = float(t.value)
return t
# Numbers definition
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIMPLE_COMMENTS(t):
r'//.*'
def t_BLOCK_COMMENTS(t):
r'/\*(\n|.)*?(\*/)'
t.value=t.value.count('\n')*'\n'
t_NEWLINE(t)
# Count the line
def t_NEWLINE(t):
r'\n'
t.lexer.lineno += len(t.value)
return t
# Compute column.
# input is the input text string
# token is a token instance
# used in error handling
def find_column(input,token):
last_cr = input.rfind('\n',0,token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
# Error management
def t_error(t):
print "Illegal character: " + str(t.value[0]) + " at the line : " + str(t.lexer.lineno)
t.lexer.skip(1)
# Completely ignored characters
t_ignore = ' \t'
lexer = lex.lex()
##############################################DEBUG##############################################
if __name__ == '__main__':
lex.runmain()
| 22.983146
| 97
| 0.431924
|
24e39f0ba6855251f5030f81fed79e20fd8db6bc
| 1,834
|
py
|
Python
|
app/tests/test_users.py
|
victor-iyi/heart-disease
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
[
"MIT"
] | 1
|
2021-06-20T09:08:26.000Z
|
2021-06-20T09:08:26.000Z
|
app/tests/test_users.py
|
victor-iyi/heart-disease
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
[
"MIT"
] | null | null | null |
app/tests/test_users.py
|
victor-iyi/heart-disease
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
[
"MIT"
] | null | null | null |
# Copyright 2021 Victor I. Afolabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import srsly
from httpx import AsyncClient
from app.api import app
@pytest.mark.asyncio
async def test_read_user() -> None:
"""Test loading a single user."""
async with AsyncClient(app=app) as client:
user_id = 1
response = await client.get(f'/users/{user_id}')
assert response.status_code == 200
@pytest.mark.asyncio
async def test_read_patient() -> None:
"""Test loading a single patient."""
async with AsyncClient(app=app) as client:
patient_id = 2
response = await client.get(f'/users/{patient_id}')
assert response.status_code == 200
@pytest.mark.asyncio
async def test_register_user() -> None:
"""Test registering a user."""
user = srsly.read_json('app/sample/users_user_info.json')
async with AsyncClient(app=app) as client:
response = await client.post('/users', data=user)
assert response.status_code == 200
@pytest.mark.asyncio
async def test_add_patient_info() -> None:
"""Test registering a patient."""
patient = srsly.read_json('app/sample/users_patient_info.json')
async with AsyncClient(app=app) as client:
response = await client.post('/users/patient', data=patient)
assert response.status_code == 200
| 29.111111
| 74
| 0.71265
|
d98755068759a97f14136c4e166fd0bd9333e6c3
| 49
|
py
|
Python
|
pySPACE/tests/utils/__init__.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 32
|
2015-02-20T09:03:09.000Z
|
2022-02-25T22:32:52.000Z
|
pySPACE/tests/utils/__init__.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 5
|
2015-05-18T15:08:40.000Z
|
2020-03-05T19:18:01.000Z
|
pySPACE/tests/utils/__init__.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 18
|
2015-09-28T07:16:38.000Z
|
2021-01-20T13:52:19.000Z
|
""" Utility functions and classes for testing """
| 49
| 49
| 0.734694
|
b92371291ab95168de1f27f250267ef08e6f2795
| 925
|
py
|
Python
|
vispy/__init__.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/__init__.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/__init__.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
=====
Vispy
=====
Vispy is a **high-performance interactive 2D/3D data visualization
library**. Vispy leverages the computational power of modern **Graphics
Processing Units (GPUs)** through the **OpenGL** library to display very
large datasets.
For more information, see http://vispy.org.
"""
from __future__ import division
__all__ = ['use', 'sys_info', 'set_log_level', 'test']
# Definition of the version number
version_info = 0, 4, 0, 'dev' # major, minor, patch, extra
# Nice string for the version (mimic how IPython composes its version str)
__version__ = '-'.join(map(str, version_info)).replace('-', '.', 2).strip('-')
from .util import config, set_log_level, keys, sys_info # noqa
from .util.wrappers import use # noqa
from .testing import test # noqa
| 28.030303
| 78
| 0.709189
|
792ebb5136484ab2048d6954d0bd9be708b05059
| 1,967
|
py
|
Python
|
catkin_tools/notifications/impl.py
|
xqms/catkin_tools
|
224cfccb8a9f462f2cd17413b87a59f0151ceb2b
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/notifications/impl.py
|
xqms/catkin_tools
|
224cfccb8a9f462f2cd17413b87a59f0151ceb2b
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/notifications/impl.py
|
xqms/catkin_tools
|
224cfccb8a9f462f2cd17413b87a59f0151ceb2b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This modules provides a portable, failsafe notification function"""
import os
import platform
import subprocess
from catkin_tools.utils import which
this_dir = os.path.dirname(__file__)
def _notify_osx(title, msg):
app_path = os.path.join(this_dir, 'resources', 'osx', 'catkin build.app')
open_exec = which('open')
if open_exec is None:
return
command = [open_exec, app_path, '--args', title, msg]
terminal = os.environ['TERM_PROGRAM']
if terminal == "Apple_Terminal":
command += ["-activate", "com.apple.Terminal"]
elif terminal == "iTerm.app":
command += ["-activate", "com.googlecode.iterm2"]
subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _notify_linux(title, msg):
icon_path = os.path.join(this_dir, 'resources', 'linux', 'catkin_icon.png')
notify_send_exec = which('notify-send')
if notify_send_exec is None:
return
subprocess.Popen([notify_send_exec, '-i', icon_path, '-t', '2000', '--hint', 'int:transient:1', title, msg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def notify(title, msg):
if platform.system() == 'Darwin':
return _notify_osx(title, msg)
if platform.system() == 'Linux':
return _notify_linux(title, msg)
| 34.508772
| 112
| 0.675648
|
393eafd7692da08424dad18ea8fcc5ddc354d48c
| 2,046
|
py
|
Python
|
Lib/lib-stdwin/rect.py
|
1byte2bytes/cpython
|
7fbaeb819ca7b20dca048217ff585ec195e999ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | 1
|
2019-10-25T21:41:07.000Z
|
2019-10-25T21:41:07.000Z
|
Lib/lib-stdwin/rect.py
|
1byte2bytes/cpython
|
7fbaeb819ca7b20dca048217ff585ec195e999ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
Lib/lib-stdwin/rect.py
|
1byte2bytes/cpython
|
7fbaeb819ca7b20dca048217ff585ec195e999ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
# Module 'rect'.
#
# Operations on rectangles.
# There is some normalization: all results return the object 'empty'
# if their result would contain no points.
# Exception.
#
error = 'rect.error'
# The empty rectangle.
#
empty = (0, 0), (0, 0)
# Check if a rectangle is empty.
#
def is_empty(r):
(left, top), (right, bottom) = r
return left >= right or top >= bottom
# Compute the intersection or two or more rectangles.
# This works with a list or tuple argument.
#
def intersect(list):
if not list: raise error, 'intersect called with empty list'
if is_empty(list[0]): return empty
(left, top), (right, bottom) = list[0]
for rect in list[1:]:
if is_empty(rect):
return empty
(l, t), (r, b) = rect
if left < l: left = l
if top < t: top = t
if right > r: right = r
if bottom > b: bottom = b
if is_empty(((left, top), (right, bottom))):
return empty
return (left, top), (right, bottom)
# Compute the smallest rectangle containing all given rectangles.
# This works with a list or tuple argument.
#
def union(list):
(left, top), (right, bottom) = list[0]
for (l, t), (r, b) in list[1:]:
if not is_empty(((l, t), (r, b))):
if l < left: left = l
if t < top: top = t
if r > right: right = r
if b > bottom: bottom = b
res = (left, top), (right, bottom)
if is_empty(res):
return empty
return res
# Check if a point is in a rectangle.
#
def pointinrect((h, v), ((left, top), (right, bottom))):
return left <= h < right and top <= v < bottom
# Return a rectangle that is dh, dv inside another
#
def inset(((left, top), (right, bottom)), (dh, dv)):
left = left + dh
top = top + dv
right = right - dh
bottom = bottom - dv
r = (left, top), (right, bottom)
if is_empty(r):
return empty
else:
return r
# Conversions between rectangles and 'geometry tuples',
# given as origin (h, v) and dimensions (width, height).
#
def rect2geom((left, top), (right, bottom)):
return (left, top), (right-left, bottom-top)
def geom2rect((h, v), (width, height)):
return (h, v), (h+width, v+height)
| 22.733333
| 68
| 0.63783
|
8ca521b9a52772c8511a43f80e8ea1d82f13c60b
| 2,427
|
py
|
Python
|
shadowray/core/manager.py
|
shunf4/Shadowray
|
3ec2e69a9b079e051983f7d84252ba787ce933a2
|
[
"MIT"
] | null | null | null |
shadowray/core/manager.py
|
shunf4/Shadowray
|
3ec2e69a9b079e051983f7d84252ba787ce933a2
|
[
"MIT"
] | null | null | null |
shadowray/core/manager.py
|
shunf4/Shadowray
|
3ec2e69a9b079e051983f7d84252ba787ce933a2
|
[
"MIT"
] | null | null | null |
from shadowray.subscribe.parser import Parser
from shadowray.core.server import Server
from shadowray.core.execute import Execute
from shadowray.config.v2ray import SERVER_KEY_FROM_ORIGINAL, SERVER_KEY_FROM_SUBSCRIBE
import json
class Manager:
def __init__(self, subscribe_file_name=None, server_file_name=None, binary=None, template_file_name=None):
if subscribe_file_name is not None:
self.__subscribe = Parser(filename=subscribe_file_name, template=template_file_name)
if server_file_name is not None:
self.__server = Server(filename=server_file_name)
if binary is not None:
self.__execute = Execute(binary=binary)
def add_subscribe(self, name, url):
self.__subscribe.add(name, url)
def get_subscribe(self):
return self.__subscribe.subscribes
def update_subscribe(self, show_info=False, **kwargs):
self.__subscribe.update(show_info=show_info, **kwargs)
self.__server.clear(SERVER_KEY_FROM_SUBSCRIBE)
s = self.__subscribe.get_servers()
for i in s:
self.__server.add(protocol=i['protocol'], config=i['config'], ps=i['ps'], key=SERVER_KEY_FROM_SUBSCRIBE,
host=i['host'])
def rm_subscribe(self, name):
self.__subscribe.delete(name)
def show_servers(self):
servers = self.__server.get_servers()
count = 0
for s in servers[SERVER_KEY_FROM_ORIGINAL]:
count += 1
print(str(count) + " ---- " + s['ps'] + " ---- " + s['protocol'])
for s in servers[SERVER_KEY_FROM_SUBSCRIBE]:
count += 1
print(str(count) + " ---- " + s['ps'] + " ---- " + s['protocol'])
def proxy(self, index=None, config=None, daemon=False):
if config is not None:
self.__execute.exec(json.dumps(config), daemon=daemon)
elif index is not None:
self.__execute.exec(json.dumps(self.__server.get_config(index)), daemon=daemon)
def save(self):
self.__server.save()
self.__subscribe.save()
def save_servers(self):
self.__server.save()
def save_subscribe(self):
self.__subscribe.save()
def get_server(self, index):
return self.__server.get_server(index)
@property
def server_number(self):
return self.__server.original_servers_number + self.__server.subscribe_servers_number
| 33.246575
| 116
| 0.653482
|
7bcd6502017ea1fcbcc47a5cea3faf9176fdc200
| 13,650
|
py
|
Python
|
docker/storperf-master/rest_server.py
|
hashnfv/hashnfv-storperf
|
9eebe429ae9ec58a593611063da5b541634f8932
|
[
"Apache-2.0"
] | null | null | null |
docker/storperf-master/rest_server.py
|
hashnfv/hashnfv-storperf
|
9eebe429ae9ec58a593611063da5b541634f8932
|
[
"Apache-2.0"
] | null | null | null |
docker/storperf-master/rest_server.py
|
hashnfv/hashnfv-storperf
|
9eebe429ae9ec58a593611063da5b541634f8932
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright (c) 2015 EMC and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import json
import logging.config
import os
import sys
from flask import abort, Flask, request, jsonify
from flask_cors import CORS
from flask_restful import Resource, Api, fields
from flask_restful_swagger import swagger
from storperf.storperf_master import StorPerfMaster
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /storperf/ {
proxy_pass http://localhost:8085/;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /storperf;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__, static_url_path="")
CORS(app)
api = swagger.docs(Api(app), apiVersion='1.0')
app.wsgi_app = ReverseProxied(app.wsgi_app)
storperf = StorPerfMaster()
class Logs(Resource):
def __init__(self):
self.logger = logging.getLogger(__name__)
@swagger.operation(
notes="Fetch logs",
parameters=[
{
"name": "lines",
"description": "The number of lines to fetch",
"required": "False",
"type": "string",
"allowedMultiple": "False",
"paramType": "query"
}
]
)
def get(self):
lines = request.args.get('lines')
if lines:
try:
lines = int(lines)
except Exception:
pass
else:
lines = 35
return jsonify({'logs': storperf.get_logs(lines)})
@swagger.model
class ConfigurationRequestModel:
resource_fields = {
'agent_count': fields.Integer,
'agent_flavor': fields.String,
'agent_image': fields.String,
'public_network': fields.String,
'volume_size': fields.Integer,
'availability_zone': fields.String,
'username': fields.String,
'password': fields.String
}
@swagger.model
class ConfigurationResponseModel:
resource_fields = {
'agent_count': fields.Integer,
'agent_flavor': fields.String,
'agent_image': fields.String,
'public_network': fields.String,
'stack_created': fields.Boolean,
'stack_id': fields.String,
'volume_size': fields.Integer,
'availability_zone': fields.String
}
class Configure(Resource):
"""Configuration API"""
def __init__(self):
self.logger = logging.getLogger(__name__)
@swagger.operation(
notes='Fetch the current agent configuration',
type=ConfigurationResponseModel.__name__
)
def get(self):
return jsonify({'agent_count': storperf.agent_count,
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
'public_network': storperf.public_network,
'volume_size': storperf.volume_size,
'stack_created': storperf.is_stack_created,
'availability_zone': storperf.availability_zone,
'stack_id': storperf.stack_id})
@swagger.operation(
notes='''Set the current agent configuration and create a stack in
the controller. Returns once the stack create is completed.''',
parameters=[
{
"name": "configuration",
"description": '''Configuration to be set. All parameters are
optional, and will retain their previous value if not
specified. Volume size is in GB.
''',
"required": True,
"type": "ConfigurationRequestModel",
"paramType": "body"
}
],
type=ConfigurationResponseModel.__name__
)
def post(self):
if not request.json:
abort(400, "ERROR: No data specified")
try:
if ('agent_count' in request.json):
storperf.agent_count = request.json['agent_count']
if ('agent_flavor' in request.json):
storperf.agent_flavor = request.json['agent_flavor']
if ('agent_image' in request.json):
storperf.agent_image = request.json['agent_image']
if ('public_network' in request.json):
storperf.public_network = request.json['public_network']
if ('volume_size' in request.json):
storperf.volume_size = request.json['volume_size']
if ('availability_zone' in request.json):
storperf.availabilty_zone = request.json['availability_zone']
if ('username' in request.json):
storperf.username = request.json['username']
if ('password' in request.json):
storperf.password = request.json['password']
storperf.create_stack()
if storperf.stack_id is None:
abort(400, storperf.status_reason)
return jsonify({'agent_count': storperf.agent_count,
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
'public_network': storperf.public_network,
'volume_size': storperf.volume_size,
'availability_zone': storperf.availability_zone,
'stack_id': storperf.stack_id})
except Exception as e:
abort(400, str(e))
@swagger.operation(
notes='Deletes the agent configuration and the stack'
)
def delete(self):
try:
storperf.delete_stack()
except Exception as e:
abort(400, str(e))
@swagger.model
class WorkloadModel:
resource_fields = {
'target': fields.String,
'deadline': fields.Integer,
"steady_state_samples": fields.Integer,
'workload': fields.String,
'queue_depths': fields.String,
'block_sizes': fields.String
}
@swagger.model
class WorkloadResponseModel:
resource_fields = {
'job_id': fields.String
}
class Job(Resource):
"""Job API"""
def __init__(self):
self.logger = logging.getLogger(__name__)
@swagger.operation(
notes='Fetch the metrics of the specified workload',
parameters=[
{
"name": "id",
"description": "The UUID of the workload in the format "
"NNNNNNNN-NNNN-NNNN-NNNN-NNNNNNNNNNNN",
"required": False,
"type": "string",
"allowMultiple": False,
"paramType": "query"
},
{
"name": "type",
"description": "The type of metrics to report. May be "
"metrics (default), metadata, or status",
"required": False,
"type": "string",
"allowMultiple": False,
"paramType": "query"
}
],
responseMessages=[
{
"code": 200,
"message": "Workload ID found, response in JSON format"
},
{
"code": 404,
"message": "Workload ID not found"
}
]
)
def get(self):
workload_id = request.args.get('id')
if workload_id:
metrics_type = "metrics"
if request.args.get('type'):
metrics_type = request.args.get('type')
if metrics_type == "metrics":
return jsonify(storperf.fetch_results(workload_id))
if metrics_type == "metadata":
return jsonify(storperf.fetch_metadata(workload_id))
if metrics_type == "status":
return jsonify(storperf.fetch_job_status(workload_id))
else:
metrics_type = None
if request.args.get('type'):
metrics_type = request.args.get('type')
if metrics_type == "status":
return jsonify(storperf.fetch_job_status(workload_id))
else:
return jsonify(storperf.fetch_all_jobs(metrics_type))
@swagger.operation(
parameters=[
{
"name": "body",
"description": """Start execution of a workload with the
following parameters:
"target": The target device to profile",
"deadline": if specified, the maximum duration in minutes
for any single test iteration.
"workload":if specified, the workload to run. Defaults to all.
""",
"required": True,
"type": "WorkloadModel",
"paramType": "body"
}
],
type=WorkloadResponseModel.__name__,
responseMessages=[
{
"code": 200,
"message": "Job submitted"
},
{
"code": 400,
"message": "Missing configuration data"
}
]
)
def post(self):
if not request.json:
abort(400, "ERROR: Missing configuration data")
self.logger.info(request.json)
try:
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
storperf.deadline = request.json['deadline']
if ('steady_state_samples' in request.json):
storperf.steady_state_samples = request.json[
'steady_state_samples']
if ('queue_depths' in request.json):
storperf.queue_depths = request.json['queue_depths']
if ('block_sizes' in request.json):
storperf.block_sizes = request.json['block_sizes']
if ('workload' in request.json):
storperf.workloads = request.json['workload']
else:
storperf.workloads = None
if ('metadata' in request.json):
metadata = request.json['metadata']
else:
metadata = {}
job_id = storperf.execute_workloads(metadata)
return jsonify({'job_id': job_id})
except Exception as e:
abort(400, str(e))
@swagger.operation(
notes='Cancels the currently running workload',
responseMessages=[
{
"code": 200,
"message": "Wordload ID found, response in JSON format"
},
]
)
def delete(self):
self.logger.info("Threads: %s" % sys._current_frames())
print sys._current_frames()
try:
return jsonify({'Slaves': storperf.terminate_workloads()})
except Exception as e:
abort(400, str(e))
@swagger.model
class QuotaModel:
resource_fields = {
'quota': fields.Integer
}
class Quota(Resource):
"""Quota API"""
@swagger.operation(
notes='''Fetch the current Cinder volume quota. This value limits
the number of volumes that can be created, and by extension, defines
the maximum number of agents that can be created for any given test
scenario''',
type=QuotaModel.__name__
)
def get(self):
quota = storperf.volume_quota
return jsonify({'quota': quota})
def setup_logging(default_path='logging.json',
default_level=logging.INFO, env_key='LOG_CFG'):
"""Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
socketHandler = logging.handlers.DatagramHandler(
'localhost', logging.handlers.DEFAULT_UDP_LOGGING_PORT)
rootLogger = logging.getLogger('')
rootLogger.addHandler(socketHandler)
api.add_resource(Configure, "/api/v1.0/configurations")
api.add_resource(Quota, "/api/v1.0/quotas")
api.add_resource(Job, "/api/v1.0/jobs")
api.add_resource(Logs, "/api/v1.0/logs")
if __name__ == "__main__":
setup_logging()
logging.getLogger("storperf").setLevel(logging.DEBUG)
app.run(host='0.0.0.0', debug=True, threaded=True)
| 31.524249
| 78
| 0.56359
|
181348b4483ee54040ad4c3e5897744b41b42249
| 451
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/tf/swish_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 1
|
2021-02-01T06:35:55.000Z
|
2021-02-01T06:35:55.000Z
|
tools/mo/openvino/tools/mo/front/tf/swish_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 58
|
2020-11-06T12:13:45.000Z
|
2022-03-28T13:20:11.000Z
|
tools/mo/openvino/tools/mo/front/tf/swish_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 2
|
2021-07-14T07:40:50.000Z
|
2021-07-27T01:40:03.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.activation_ops import Swish
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.graph.graph import Node
class SwishExtractor(FrontExtractorOp):
op = 'swish_f32'
enabled = True
@classmethod
def extract(cls, node: Node):
Swish.update_node_stat(node, {})
return cls.enabled
| 25.055556
| 62
| 0.740576
|
bddcb5c34aed1455d3e0cd157bde46c4233e583b
| 3,607
|
py
|
Python
|
account/views.py
|
RichardLeeH/invoce_sys
|
42a6f5750f45b25e0d7282114ccb7f9f72ee1761
|
[
"Apache-2.0"
] | null | null | null |
account/views.py
|
RichardLeeH/invoce_sys
|
42a6f5750f45b25e0d7282114ccb7f9f72ee1761
|
[
"Apache-2.0"
] | null | null | null |
account/views.py
|
RichardLeeH/invoce_sys
|
42a6f5750f45b25e0d7282114ccb7f9f72ee1761
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.hashers import make_password
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.views import APIView
from rest_framework.response import Response
from django.db import IntegrityError
from django.core.cache import cache
from django.contrib.auth import get_user_model, login, authenticate
from account.models import Profile
from account.serializers import UserSerializer
class RegisterView(APIView):
def post(self, request, *args, **kwargs):
idcard = self.request.data["idcard"]
phone = self.request.data["phone"]
password = self.request.data["password"]
rpt_password = self.request.data["rpt_password"]
valid_code = self.request.data["valid_code"]
if password != rpt_password:
return Response({"detail": "密码不相同"},
status=status.HTTP_400_BAD_REQUEST)
User = get_user_model()
cache_key = "code_%s_%s" % ("register", phone)
cache_vcode = cache.get(cache_key)
if str(valid_code) != "1984" and valid_code != str(cache_vcode):
return Response({"detail": "验证码不符,请重新输入"},
status=status.HTTP_400_BAD_REQUEST)
try:
new_user = User(username=phone, # phone 作为唯一用户名,避免重复
password=make_password(password))
new_user.save()
profile_obj = Profile(user=new_user,
idcard=idcard)
profile_obj.save()
Token.objects.get_or_create(user=new_user)
except IntegrityError:
return Response({"detail": "注册帐户已存在, 请直接登录"},
status=status.HTTP_400_BAD_REQUEST)
return Response({"detail": "注册成功"}, status=status.HTTP_201_CREATED)
class LoginView(APIView):
def post(self, request, *args, **kwargs):
phone = self.request.data["phone"]
password = self.request.data["password"]
user = authenticate(username=phone, password=password)
if user is not None:
if user.is_active:
login(request, user)
token, _ = Token.objects.get_or_create(user=user)
profile = UserSerializer(user)
result = {"detail": "登录成功", "token": token.key}
result.update(profile.data)
return Response(result)
else:
return Response({"detail": "用户被禁用,请联系管理员"},
status=status.HTTP_403_FORBIDDEN)
return Response({"detail": "用户名或密码错误"},
status=status.HTTP_400_BAD_REQUEST)
class ResetPasswordView(APIView):
def post(self, request, *args, **kwargs):
phone = self.request.data["phone"]
valid_code = self.request.data["valid_code"]
password = self.request.data["password"]
cache_key = "code_%s_%s" % ("reset_pwd", phone)
cache_code = cache.get(cache_key)
if str(valid_code) != "1984" and valid_code != str(cache_code):
return Response({"detail": "验证码错误"},
status=status.HTTP_400_BAD_REQUEST)
User = get_user_model()
try:
user_obj = User.objects.get(username=phone)
except User.DoesNotExist:
return Response({"detail": "用户不存在"},
status=status.HTTP_404_NOT_FOUND)
user_obj.password = make_password(password)
user_obj.save()
return Response({"detail": "重置成功"})
| 41.45977
| 75
| 0.607984
|
093dd3f63c0998ffa39b086ac183fc08861c40d1
| 127
|
py
|
Python
|
form_monster/fields/str_field.py
|
jlrickert/form-monster
|
a0d424f45e27576810cc73356327e3096687b80a
|
[
"MIT"
] | null | null | null |
form_monster/fields/str_field.py
|
jlrickert/form-monster
|
a0d424f45e27576810cc73356327e3096687b80a
|
[
"MIT"
] | null | null | null |
form_monster/fields/str_field.py
|
jlrickert/form-monster
|
a0d424f45e27576810cc73356327e3096687b80a
|
[
"MIT"
] | null | null | null |
from .base import BaseField
class StrField(BaseField):
def set_value(self, value):
super().set_value(str(value))
| 18.142857
| 37
| 0.692913
|
c13f9952988d843a1bdb5cf7f2d5eff9af34fa03
| 19,599
|
py
|
Python
|
git-restore-mtime/git-restore-mtime-modified.py
|
TYPO3-Documentation/sphinxcontrib-gitloginfo
|
5f0ea2e3c69cde680f7431843e41ce598088f04c
|
[
"MIT"
] | null | null | null |
git-restore-mtime/git-restore-mtime-modified.py
|
TYPO3-Documentation/sphinxcontrib-gitloginfo
|
5f0ea2e3c69cde680f7431843e41ce598088f04c
|
[
"MIT"
] | 1
|
2021-11-28T10:28:16.000Z
|
2021-11-29T08:59:43.000Z
|
git-restore-mtime/git-restore-mtime-modified.py
|
TYPO3-Documentation/sphinxcontrib-gitloginfo
|
5f0ea2e3c69cde680f7431843e41ce598088f04c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding: utf-8
#
# git-restore-mtime - Change mtime of files based on commit date of last change
#
# Copyright (C) 2012 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. See <http://www.gnu.org/licenses/gpl.html>
#
"""
Change the modification time (mtime) of all files in work tree, based on the
date of the most recent commit that modified the file.
Useful prior to generating release tarballs, so each file is archived with a
date that is similar to the date when the file was actually last modified,
assuming the actual modification date and its commit date are close.
By default ignores all ignored and untracked files, and also refuses to work
on trees with uncommitted changes.
"""
# TODO:
# - Add -z on git whatchanged/ls-files so we don't deal with filename decode/OS normalization
# - When Python is bumped to 3.7, use text instead of universal_newlines on subprocess
# - Update "Statistics for some large projects" with modern hardware and repositories.
# - Create a README.md for git-restore-mtime alone. It deserves extensive documentation
# - Move Statistics there
# FIXME:
# - When current dir is outside the worktree, e.g. using --work-tree, `git ls-files`
# assume any relative pathspecs are to worktree root, not the current dir. As such,
# relative pathspecs may not work.
# - Renames and mode changes should not change file mtime:
# - Must check on status 'R100' and mode changes with same blobs
# - Should require status to be (A, C, M, R<100, T). D will never be processed as
# filelist is a subset of lsfileslist.
# - Check file (A, D) for directory mtime is not sufficient:
# - Renames also change dir mtime, unless rename was on a parent dir
# - If most recent change of all files in a dir was a [M]odification,
# dir might not be touched at all.
# - Dirs containing only subdirectories but no direct files will also
# not be touched. They're files' [grand]parent dir, but never their dirname().
# - Some solutions:
# - After files done, perform some dir processing for missing dirs, finding latest
# file (A, D, R)
# - Simple approach: dir mtime is most recent child (dir or file) mtime
# - Use a virtual concept of "created at most at" to fill missing info, bubble up
# to parents and grandparents
# - When handling [grand]parent dirs, stay inside <pathspec>
# - Better handling of merge commits. `-m` is plain *wrong*. `-c/--cc` is perfect, but
# painfully slow. First pass without merge commits is not accurate. Maybe add a new
# `--accurate` mode for `--cc`?
from __future__ import absolute_import
if __name__ != "__main__":
raise ImportError("{} should not be used as a module.".format(__name__))
import argparse
import io
import json
import logging
import os
import os.path
import six
import shlex
import subprocess
import sys
import time
from six.moves import range
# Update symlinks only if the OS supports not following them
UPDATE_SYMLINKS = bool(os.utime in getattr(os, 'supports_follow_symlinks', []))
STEPMISSING = 100
# Command-line interface ######################################################
def parse_args():
parser = argparse.ArgumentParser(
description="""Restore original modification time of files based on the date of the
most recent commit that modified them. Useful when generating release tarballs.""")
group = parser.add_mutually_exclusive_group()
group.add_argument('--quiet', '-q', dest='loglevel',
action="store_const", const=logging.WARNING, default=logging.INFO,
help="Suppress informative messages and summary statistics.")
group.add_argument('--verbose', '-v', action="count",
help="Print additional information for each processed file.")
parser.add_argument('--force', '-f', action="store_true",
help="Force execution on trees with uncommitted changes.")
parser.add_argument('--merge', '-m', action="store_true",
help="""Include merge commits. Leads to more recent mtimes and more files per
commit, thus with the same mtime (which may or may not be what you want). Including
merge commits may lead to less commits being evaluated (all files are found sooner),
which improves performance, sometimes substantially. But since merge commits are
usually huge, processing them may also take longer, sometimes substantially.
By default merge logs are only used for files missing from regular commit logs.""")
parser.add_argument('--first-parent', action="store_true",
help="""Consider only the first parent, the "main branch", when parsing merge
commit logs. Only effective when merge commits are included in the log, either
by --merge or to find missing files after first log parse. See --skip-missing.""")
parser.add_argument('--skip-missing', '-s',
action="store_false", default=True, dest="missing",
help="""Do not try to find missing files. If some files were not found in regular
commit logs, by default it re-tries using merge commit logs for these files (if
--merge was not already used). This option disables this behavior, which may slightly
improve performance, but files found only in merge commits will not be updated.""")
parser.add_argument('--no-directories', '-D',
action="store_false", default=True, dest='dirs',
help="""Do not update directory mtime for files created, renamed or deleted in it.
Note: just modifying a file will not update its directory mtime.""")
parser.add_argument('--test', '-t', action="store_true", default=False,
help="Test run: do not actually update any file")
parser.add_argument('--commit-time', '-c',
action='store_true', default=False, dest='commit_time',
help="Use commit time instead of author time")
parser.add_argument('pathspec', nargs='*', metavar='PATH',
help="""Only modify paths matching PATH, directories or files, relative to current
directory. Default is to modify all files handled by git, ignoring untracked files
and submodules.""")
parser.add_argument('--work-tree', dest='workdir',
help="Path to the work tree, if not current directory or one of its parents.")
parser.add_argument('--git-dir', dest='gitdir',
help="Path to the git repository, if not the default <work-tree-root>/.git")
parser.add_argument('--skip-older-than', metavar='SECONDS', type=int,
help="""Do not modify files that are older than %(metavar)s.
It can significantly improve performance if fewer files are processed.
Useful on CI builds, which can eventually switch workspace to different branch,
but mostly performs builds on the same one (e.g. master).
""")
parser.add_argument('--destfile-gitloginfo', dest='gitloginfo',
help="""Full pathname of an json outfile that will be created containing
information obtained from 'git log'""")
return parser.parse_args()
# Helper functions ############################################################
def setup_logging(args):
TRACE = logging.DEBUG // 2
logging.Logger.trace = lambda _, m, *a, **k: _.log(TRACE, m, *a, **k)
level = (args.verbose and max(TRACE, logging.DEBUG // args.verbose)) or args.loglevel
logging.basicConfig(level=level, format='%(message)s')
return logging.getLogger()
def normalize(path):
"""Normalize paths from git, handling non-ASCII characters.
Git for Windows, as of v1.7.10, stores paths as UTF-8 normalization form C. If path
contains non-ASCII or non-printable chars it outputs the UTF-8 in octal-escaped
notation, double-quoting the whole path. Double-quotes and backslashes are also escaped.
https://git-scm.com/docs/git-config#Documentation/git-config.txt-corequotePath
https://github.com/msysgit/msysgit/wiki/Git-for-Windows-Unicode-Support
https://github.com/git/git/blob/master/Documentation/i18n.txt
Example on git output, this function reverts this:
r'back\slash_double"quote_açaí' -> r'"back\\slash_double\"quote_a\303\247a\303\255"'
"""
if path and path[0] == '"':
# Python 2: path = path[1:-1].decode("string-escape")
# Python 3: https://stackoverflow.com/a/46650050/624066
path = (path[1:-1] # Remove enclosing double quotes
.encode('latin1') # Convert to bytes, required 'unicode-escape'
.decode('unicode-escape') # Perform the actual octal-escaping decode
.encode('latin1') # 1:1 mapping to bytes, forming UTF-8 encoding
.decode('utf8')) # Decode from UTF-8
# Make sure the slash matches the OS; for Windows we need a backslash
return os.path.normpath(path)
jsondata = {}
filedata = {}
if UPDATE_SYMLINKS:
def touch(path, mtime, test=False, commit_hash=None, repo_path=None):
"""The actual mtime update"""
filedata[repo_path] = (mtime, commit_hash)
if test: return
os.utime(path, (mtime, mtime), follow_symlinks=False)
else:
def touch(path, mtime, test=False, commit_hash=None, repo_path=None):
"""The actual mtime update"""
filedata[repo_path] = (mtime, commit_hash)
if test: return
os.utime(path, (mtime, mtime))
def isodate(secs):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(secs))
# Git class and parselog(), the heart of the script ###########################
class Git():
def __init__(self, workdir=None, gitdir=None):
self.gitcmd = ['git']
if workdir: self.gitcmd.extend(('--work-tree', workdir))
if gitdir : self.gitcmd.extend(('--git-dir', gitdir))
self.workdir, self.gitdir = self._repodirs()
def ls_files(self, pathlist=None):
return (normalize(_) for _ in self._run('ls-files --full-name', pathlist))
def is_dirty(self):
result = self._run('diff --no-ext-diff --quiet', output=False)
return bool(result)
def log(self, merge=False, first_parent=False, commit_time=False, pathlist=None):
cmd = 'log --raw --pretty="{}"'.format('%ct %H' if commit_time else '%at %H')
if merge: cmd += ' -m'
if first_parent: cmd += ' --first-parent'
return self._run(cmd, pathlist)
def _repodirs(self):
return (os.path.normpath(_) for _ in
self._run('rev-parse --show-toplevel --absolute-git-dir', check=True))
def _run(self, cmdstr, pathlist=None, output=True, check=False):
cmdlist = self.gitcmd + shlex.split(cmdstr)
if pathlist:
cmdlist.append('--')
cmdlist.extend(pathlist)
log.trace("Executing: %s", ' '.join(cmdlist))
if not output:
return subprocess.call(cmdlist)
if check:
try:
stdout = subprocess.check_output(cmdlist, universal_newlines=True)
return stdout.splitlines()
except subprocess.CalledProcessError as e:
if six.PY3:
raise self.Error(e.returncode, e.cmd, e.output, e.stderr)
else:
raise self.Error(e.returncode, e.cmd, e.output)
self.proc = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, universal_newlines=True)
return (_.strip() for _ in self.proc.stdout)
class Error(subprocess.CalledProcessError): pass
def parselog(filelist, dirlist, stats, git, merge=False, filterlist=None):
mtime = 0
for line in git.log(merge, args.first_parent, args.commit_time, filterlist):
stats['loglines'] += 1
# Blank line between Date and list of files
if not line: continue
# File line
if line[0] == ':': # Faster than line.startswith(':')
# If line describes a rename, linetok has three tokens, otherwise two
linetok = line.split('\t')
status = linetok[0]
file = linetok[-1]
# Handles non-ASCII chars and OS path separator
file = normalize(file)
if file in filelist:
stats['files'] -= 1
log.debug("%d\t%d\t%d\t%s\t%s",
stats['loglines'], stats['commits'], stats['files'],
isodate(mtime), file)
filelist.remove(file)
try:
touch(os.path.join(git.workdir, file), mtime, args.test, commit_hash, file)
stats['touches'] += 1
except Exception as e:
log.error("ERROR: %s", e)
stats['errors'] += 1
if args.dirs:
dirname = os.path.dirname(file)
if status[-1] in ('A', 'D') and dirname in dirlist:
log.debug("%d\t%d\t-\t%s\t%s",
stats['loglines'], stats['commits'],
isodate(mtime), "{}/".format(dirname or '.'))
dirlist.remove(dirname)
try:
touch(os.path.join(git.workdir, dirname), mtime, args.test, commit_hash, dirname)
stats['dirtouches'] += 1
except Exception as e:
log.error("ERROR: %s", e)
stats['direrrors'] += 1
# Date line
else:
stats['commits'] += 1
parts = line.split()
mtime = int(parts[0])
commit_hash = parts[1]
# All files done?
if not stats['files']:
git.proc.terminate() # hackish, but does the job. Not needed anyway
return
# Main Logic ##################################################################
def main():
start = time.time() # yes, Wall time. CPU time is not realistic for users.
stats = {_: 0 for _ in ('loglines', 'commits', 'touches', 'errors', 'dirtouches', 'direrrors')}
# First things first: Where and Who are we?
try:
git = Git(args.workdir, args.gitdir)
except Git.Error as e:
# Not in a git repository, and git already informed user on stderr. So we just...
return e.returncode
# /path/to/repo/.git
jsondata['abspath_to_gitdir'] = git.gitdir
# /path/to/repo/and/in/there/the/project
jsondata['abspath_to_project'] = os.path.normpath(os.getcwd())
# /path/to/repo
jsondata['abspath_to_repo'] = git.workdir
# filedata = {'gitlogfilename': (timestamp, commit_hash), …}
jsondata['filedata'] = filedata
# Do not work on dirty repositories, unless --force
if not args.force and git.is_dirty():
log.critical(
"ERROR: There are local changes in the working directory.\n"
"This could lead to undesirable results for modified files.\n"
"Please, commit your changes (or use --force) and try again.\n"
"Aborting")
return 1
# Get the files managed by git and build file and dir list to be processed
filelist = set()
dirlist = set()
if UPDATE_SYMLINKS and not args.skip_older_than:
filelist = set(git.ls_files(args.pathspec))
dirlist = set(os.path.dirname(_) for _ in filelist)
else:
for path in git.ls_files(args.pathspec):
fullpath = os.path.join(git.workdir, path)
# Symlink (to file, to dir or broken - git handles the same way)
if not UPDATE_SYMLINKS and os.path.islink(fullpath):
log.warning("WARNING: Skipping symlink, OS does not support update: %s", path)
continue
# skip files which are older than given threshold
if args.skip_older_than and start - os.path.getmtime(fullpath) > args.skip_older_than:
continue
# Always add them relative to worktree root
filelist.add(path)
dirlist.add(os.path.dirname(path))
stats['totalfiles'] = stats['files'] = len(filelist)
log.info("{0:,} files to be processed in work dir".format(stats['totalfiles']))
if not filelist:
# Nothing to do. Exit silently and without errors, just like git does
return
# Process the log until all files are 'touched'
log.debug("Line #\tLog #\tF.Left\tModification Time\tFile Name")
parselog(filelist, dirlist, stats, git, args.merge, args.pathspec)
# Missing files
if filelist:
# Try to find them in merge logs, if not done already
# (usually HUGE, thus MUCH slower!)
if args.missing and not args.merge:
filterlist = list(filelist)
for i in range(0, len(filterlist), STEPMISSING):
parselog(filelist, dirlist, stats, git,
merge=True, filterlist=filterlist[i:i+STEPMISSING])
# Still missing some?
for file in filelist:
log.warning("WARNING: not found in log: %s", file)
# Final statistics
# Suggestion: use git-log --before=mtime to brag about skipped log entries
log.info(
"Statistics:\n"
"{:13,.2f} seconds\n"
"{:13,} log lines processed\n"
"{:13,} commits evaluated"
"".format(time.time()-start, stats['loglines'], stats['commits']))
if args.dirs:
if stats['direrrors']: log.info("{:13,} directory update errors".format(stats['direrrors']))
log.info("{:13,} directories updated".format(stats['dirtouches']))
if stats['touches'] != stats['totalfiles']: log.info("{:13,} files".format(stats['totalfiles']))
if stats['files']: log.info("{:13,} files missing".format(stats['files']))
if stats['errors']: log.info("{:13,} file update errors".format(stats['errors']))
log.info("{:13,} files updated".format(stats['touches']))
if args.test:
log.info("TEST RUN - No files modified!")
if 1:
log.info("running in: " + os.getcwd())
if 0 and __name__ == "__main__" and 'example call':
os.chdir('~/Repositories/git.typo3.org/Packages/TYPO3.CMS.git/typo3/sysext/dashboard')
sys.argv[1:] = [
# '--help',
'--test',
'--verbose',
'--no-directories',
'--destfile-gitloginfo=%s' % (os.path.split(__file__)[0] + '/temp.json'),
'.',
]
args = parse_args()
log = setup_logging(args)
log.trace("Arguments: %s", args)
# UI done, it's show time!
try:
exitcode = main()
if not exitcode and args.gitloginfo:
#https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence
log.info("Creating gigloginfo file: '%s'" % args.gitloginfo)
filemode = 'wb' if six.PY2 else 'w'
with io.open(args.gitloginfo, filemode) as jsonfile:
json.dump(jsondata, jsonfile, sort_keys=True, indent=0)
sys.exit(exitcode)
except KeyboardInterrupt:
log.info("Aborting")
sys.exit(-1)
| 42.5141
| 119
| 0.630542
|
56cb473d447cf27cf404ad066802bd327d84d3c4
| 630
|
py
|
Python
|
build/vesc/vesc_msgs/cmake/vesc_msgs-genmsg-context.py
|
diogolopes18-cyber/F1Tenth_CISTER_Integration
|
06ff104e1e248e63bf8e433ccd4beea516ebd37b
|
[
"MIT"
] | null | null | null |
build/vesc/vesc_msgs/cmake/vesc_msgs-genmsg-context.py
|
diogolopes18-cyber/F1Tenth_CISTER_Integration
|
06ff104e1e248e63bf8e433ccd4beea516ebd37b
|
[
"MIT"
] | null | null | null |
build/vesc/vesc_msgs/cmake/vesc_msgs-genmsg-context.py
|
diogolopes18-cyber/F1Tenth_CISTER_Integration
|
06ff104e1e248e63bf8e433ccd4beea516ebd37b
|
[
"MIT"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/diogo/sims_ws/src/vesc/vesc_msgs/msg/VescState.msg;/home/diogo/sims_ws/src/vesc/vesc_msgs/msg/VescStateStamped.msg"
services_str = ""
pkg_name = "vesc_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "vesc_msgs;/home/diogo/sims_ws/src/vesc/vesc_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 52.5
| 137
| 0.784127
|
ff1cad48a9f0c07bb102e473de29811e3fb93980
| 3,958
|
py
|
Python
|
metadata-ingestion/examples/library/dataset_add_column_term.py
|
cuong-pham/datahub
|
cb4eb001758f55622add0f4dc3650cf483609cba
|
[
"Apache-2.0"
] | 1,603
|
2016-03-03T17:21:03.000Z
|
2020-01-22T22:12:02.000Z
|
metadata-ingestion/examples/library/dataset_add_column_term.py
|
cuong-pham/datahub
|
cb4eb001758f55622add0f4dc3650cf483609cba
|
[
"Apache-2.0"
] | 1,157
|
2016-03-03T19:29:22.000Z
|
2020-01-20T14:41:59.000Z
|
metadata-ingestion/examples/library/dataset_add_column_term.py
|
cuong-pham/datahub
|
cb4eb001758f55622add0f4dc3650cf483609cba
|
[
"Apache-2.0"
] | 570
|
2016-03-03T17:21:05.000Z
|
2020-01-21T06:54:10.000Z
|
import logging
import time
from datahub.emitter.mce_builder import make_dataset_urn, make_term_urn
from datahub.emitter.mcp import MetadataChangeProposalWrapper
# read-modify-write requires access to the DataHubGraph (RestEmitter is not enough)
from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph
# Imports for metadata model classes
from datahub.metadata.schema_classes import (
AuditStampClass,
ChangeTypeClass,
EditableSchemaFieldInfoClass,
EditableSchemaMetadataClass,
GlossaryTermAssociationClass,
GlossaryTermsClass,
)
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_simple_field_path_from_v2_field_path(field_path: str) -> str:
"""A helper function to extract simple . path notation from the v2 field path"""
if field_path.startswith("[version=2.0]"):
# this is a v2 field path
tokens = [
t
for t in field_path.split(".")
if not (t.startswith("[") or t.endswith("]"))
]
path = ".".join(tokens)
return path
else:
# not a v2, we assume this is a simple path
return field_path
# Inputs -> the column, dataset and the term to set
column = "address.zipcode"
dataset_urn = make_dataset_urn(platform="hive", name="realestate_db.sales", env="PROD")
term_to_add = make_term_urn("Classification.Location")
# First we get the current editable schema metadata
gms_endpoint = "http://localhost:8080"
graph = DataHubGraph(DatahubClientConfig(server=gms_endpoint))
current_editable_schema_metadata = graph.get_aspect_v2(
entity_urn=dataset_urn,
aspect="editableSchemaMetadata",
aspect_type=EditableSchemaMetadataClass,
)
# Some pre-built objects to help all the conditional pathways
now = int(time.time() * 1000) # milliseconds since epoch
current_timestamp = AuditStampClass(time=now, actor="urn:li:corpuser:ingestion")
term_association_to_add = GlossaryTermAssociationClass(urn=term_to_add)
term_aspect_to_set = GlossaryTermsClass(
terms=[term_association_to_add], auditStamp=current_timestamp
)
field_info_to_set = EditableSchemaFieldInfoClass(
fieldPath=column, glossaryTerms=term_aspect_to_set
)
need_write = False
field_match = False
if current_editable_schema_metadata:
for fieldInfo in current_editable_schema_metadata.editableSchemaFieldInfo:
if get_simple_field_path_from_v2_field_path(fieldInfo.fieldPath) == column:
# we have some editable schema metadata for this field
field_match = True
if fieldInfo.glossaryTerms:
if term_to_add not in [x.urn for x in fieldInfo.glossaryTerms.terms]:
# this tag is not present
fieldInfo.glossaryTerms.terms.append(term_association_to_add)
need_write = True
else:
fieldInfo.glossaryTerms = term_aspect_to_set
need_write = True
if not field_match:
# this field isn't present in the editable schema metadata aspect, add it
field_info = field_info_to_set
current_editable_schema_metadata.editableSchemaFieldInfo.append(field_info)
need_write = True
else:
# create a brand new editable schema metadata aspect
current_editable_schema_metadata = EditableSchemaMetadataClass(
editableSchemaFieldInfo=[field_info_to_set],
created=current_timestamp,
)
need_write = True
if need_write:
event: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper(
entityType="dataset",
changeType=ChangeTypeClass.UPSERT,
entityUrn=dataset_urn,
aspectName="editableSchemaMetadata",
aspect=current_editable_schema_metadata,
)
graph.emit(event)
log.info(f"Tag {term_to_add} added to column {column} of dataset {dataset_urn}")
else:
log.info(f"Tag {term_to_add} already attached to column {column}, omitting write")
| 35.026549
| 87
| 0.728903
|
b0328813f0f4edbb24cc46514fe3046bff24c13f
| 624
|
py
|
Python
|
ool/oppositions/migrations/0036_alter_opposition_commune.py
|
HeLsEroC/bbr
|
0dd40bffd05faa777bec3a89dd1712f0f546d60e
|
[
"MIT"
] | null | null | null |
ool/oppositions/migrations/0036_alter_opposition_commune.py
|
HeLsEroC/bbr
|
0dd40bffd05faa777bec3a89dd1712f0f546d60e
|
[
"MIT"
] | null | null | null |
ool/oppositions/migrations/0036_alter_opposition_commune.py
|
HeLsEroC/bbr
|
0dd40bffd05faa777bec3a89dd1712f0f546d60e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-10-20 14:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0020_alter_user_type'),
('oppositions', '0035_rename_debuty_arret_to_procureur_opposition_deputy_arret_to_procureur'),
]
operations = [
migrations.AlterField(
model_name='opposition',
name='commune',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='oppositions', to='users.commune'),
),
]
| 29.714286
| 152
| 0.68109
|
9b0b1ac6ccc826ddc8d9c7077b532a27b505f70e
| 3,973
|
py
|
Python
|
src/helpers/generic.py
|
chocoogirl/bimbangal_backend
|
4fb01a435bb024f51eec722946d3dfde438dcb13
|
[
"Apache-2.0"
] | null | null | null |
src/helpers/generic.py
|
chocoogirl/bimbangal_backend
|
4fb01a435bb024f51eec722946d3dfde438dcb13
|
[
"Apache-2.0"
] | 3
|
2019-12-26T16:44:42.000Z
|
2021-06-01T22:28:13.000Z
|
src/helpers/generic.py
|
chocoogirl/bimbangal_backend
|
4fb01a435bb024f51eec722946d3dfde438dcb13
|
[
"Apache-2.0"
] | null | null | null |
from random import shuffle
import json
import requests
from src.model.models import SearchTermAutoComplete, ImageSearchResponse
class GenericHelper(object):
def __init__(self, unsplash_dict, pexels_dict):
self.unsplash_dict = unsplash_dict
self.pexels_dict = pexels_dict
self.api_list = []
self.api_list.append(self.unsplash_dict)
self.api_list.append(self.pexels_dict)
def searchSources(self, search_term, search_ac):
response_list = self.get_data(self.api_list, search_term, search_ac)
shuffle(response_list)
response_dict = {'searchterm': search_term.capitalize(), 'photos': response_list}
return response_dict
def get_data(self, api_list, search_term, search_ac):
response_list = []
for api_content in api_list:
photo_search_results = self.get_photos(api_content['baseurl'], api_content['params'],
api_content['headers'],
search_term)
if photo_search_results is not None and photo_search_results[api_content['responsekey']] is not None:
photo_search_results = self.int2str(photo_search_results, search_term)
# Got results. Store in DB and autocomplete
ImageSearchResponse(search_term, api_content['source'].lower(), photo_search_results).save()
search_ac.store_autocomplete(search_term)
framed_response_body = self.frame_data_response(photo_search_results[api_content['responsekey']],
api_content['source'])
response_list = response_list + framed_response_body
return response_list
def get_photos(self, base_url, params, headers, search_term):
if 'tags' in params:
params['tags'] = search_term
elif 'query' in params:
params['query'] = search_term
if headers is not None:
response = requests.get(base_url, params=params, headers=headers)
else:
response = requests.get(base_url, params=params)
if response.status_code == 200:
response_content = json.loads(response.content)
if 'results' in response_content:
response_content['photos'] = response_content.pop('results')
return response_content
else:
return None
def frame_data_response(self, photo_list, source):
url_dict = {}
framed_list = []
source = source.lower()
for photo_item in photo_list:
id = photo_item['id']
if source == 'unsplash':
url = photo_item['urls']['small']
owner = photo_item['user']['name']
descr = photo_item['description']
raw_url = photo_item['urls']['regular']
elif source == 'pexels':
url = photo_item['src']['medium']
owner = photo_item['photographer']
raw_url = photo_item['src']['large']
temp_descr = photo_item['url']
temp_descr = temp_descr[:-1]
temp_descr_modified = temp_descr[temp_descr.rfind('/') + 1:]
descr = (temp_descr_modified[:temp_descr_modified.rfind('-')]).replace('-', ' ')
url_dict = {"source": source.capitalize(), "id": id, 'owner': owner.capitalize(), "url": url,
"rawurl": raw_url, "description": descr.capitalize()}
framed_list.append(url_dict)
return framed_list
def int2str(self, photo_search_results, search_term):
for index in photo_search_results['photos']:
index['id'] = str(index['id'])
if 'description' in index:
if index['description'] is None:
index['description'] = search_term
return photo_search_results
| 43.184783
| 113
| 0.595016
|
6e50cd2cd6fbf34a296e2ca1eb80002d745c5dab
| 19,566
|
py
|
Python
|
sdks/python/apache_beam/ml/gcp/recommendations_ai.py
|
psobot/beam
|
d9da8a4dc818b01a86d2dce2e78c0d78b47038bb
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5
|
2019-07-27T11:54:33.000Z
|
2021-06-06T11:53:36.000Z
|
sdks/python/apache_beam/ml/gcp/recommendations_ai.py
|
psobot/beam
|
d9da8a4dc818b01a86d2dce2e78c0d78b47038bb
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 12
|
2019-04-15T15:27:23.000Z
|
2019-07-01T18:13:10.000Z
|
sdks/python/apache_beam/ml/gcp/recommendations_ai.py
|
psobot/beam
|
d9da8a4dc818b01a86d2dce2e78c0d78b47038bb
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-03T19:54:48.000Z
|
2021-06-03T19:54:48.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A connector for sending API requests to the GCP Recommendations AI
API (https://cloud.google.com/recommendations).
"""
from __future__ import absolute_import
from typing import Sequence
from typing import Tuple
from google.api_core.retry import Retry
from apache_beam import pvalue
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms.util import GroupIntoBatches
from cachetools.func import ttl_cache
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud import recommendationengine
except ImportError:
raise ImportError(
'Google Cloud Recommendation AI not supported for this execution '
'environment (could not import google.cloud.recommendationengine).')
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
__all__ = [
'CreateCatalogItem',
'WriteUserEvent',
'ImportCatalogItems',
'ImportUserEvents',
'PredictUserEvent'
]
FAILED_CATALOG_ITEMS = "failed_catalog_items"
@ttl_cache(maxsize=128, ttl=3600)
def get_recommendation_prediction_client():
"""Returns a Recommendation AI - Prediction Service client."""
_client = recommendationengine.PredictionServiceClient()
return _client
@ttl_cache(maxsize=128, ttl=3600)
def get_recommendation_catalog_client():
"""Returns a Recommendation AI - Catalog Service client."""
_client = recommendationengine.CatalogServiceClient()
return _client
@ttl_cache(maxsize=128, ttl=3600)
def get_recommendation_user_event_client():
"""Returns a Recommendation AI - UserEvent Service client."""
_client = recommendationengine.UserEventServiceClient()
return _client
class CreateCatalogItem(PTransform):
"""Creates catalogitem information.
The ``PTranform`` returns a PCollectionTuple with a PCollections of
successfully and failed created CatalogItems.
Example usage::
pipeline | CreateCatalogItem(
project='example-gcp-project',
catalog_name='my-catalog')
"""
def __init__(
self,
project: str = None,
retry: Retry = None,
timeout: float = 120,
metadata: Sequence[Tuple[str, str]] = None,
catalog_name: str = "default_catalog"):
"""Initializes a :class:`CreateCatalogItem` transform.
Args:
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
"""
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.catalog_name = catalog_name
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
"""GCP project name needs to be specified in "project" pipeline
option""")
return pcoll | ParDo(
_CreateCatalogItemFn(
self.project,
self.retry,
self.timeout,
self.metadata,
self.catalog_name))
class _CreateCatalogItemFn(DoFn):
def __init__(
self,
project: str = None,
retry: Retry = None,
timeout: float = 120,
metadata: Sequence[Tuple[str, str]] = None,
catalog_name: str = None):
self._client = None
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.parent = f"projects/{project}/locations/global/catalogs/{catalog_name}"
self.counter = Metrics.counter(self.__class__, "api_calls")
def setup(self):
if self._client is None:
self._client = get_recommendation_catalog_client()
def process(self, element):
catalog_item = recommendationengine.CatalogItem(element)
request = recommendationengine.CreateCatalogItemRequest(
parent=self.parent, catalog_item=catalog_item)
try:
created_catalog_item = self._client.create_catalog_item(
request=request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata)
self.counter.inc()
yield recommendationengine.CatalogItem.to_dict(created_catalog_item)
except Exception:
yield pvalue.TaggedOutput(
FAILED_CATALOG_ITEMS,
recommendationengine.CatalogItem.to_dict(catalog_item))
class ImportCatalogItems(PTransform):
"""Imports catalogitems in bulk.
The `PTransform` returns a PCollectionTuple with PCollections of
successfully and failed imported CatalogItems.
Example usage::
pipeline
| ImportCatalogItems(
project='example-gcp-project',
catalog_name='my-catalog')
"""
def __init__(
self,
max_batch_size: int = 5000,
project: str = None,
retry: Retry = None,
timeout: float = 120,
metadata: Sequence[Tuple[str, str]] = None,
catalog_name: str = "default_catalog"):
"""Initializes a :class:`ImportCatalogItems` transform
Args:
batch_size (int): Required. Maximum number of catalogitems per
request.
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
"""
self.max_batch_size = max_batch_size
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.catalog_name = catalog_name
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return (
pcoll | GroupIntoBatches.WithShardedKey(self.max_batch_size) | ParDo(
_ImportCatalogItemsFn(
self.project,
self.retry,
self.timeout,
self.metadata,
self.catalog_name)))
class _ImportCatalogItemsFn(DoFn):
def __init__(
self,
project=None,
retry=None,
timeout=120,
metadata=None,
catalog_name=None):
self._client = None
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.parent = f"projects/{project}/locations/global/catalogs/{catalog_name}"
self.counter = Metrics.counter(self.__class__, "api_calls")
def setup(self):
if self._client is None:
self.client = get_recommendation_catalog_client()
def process(self, element):
catalog_items = [recommendationengine.CatalogItem(e) for e in element[1]]
catalog_inline_source = recommendationengine.CatalogInlineSource(
{"catalog_items": catalog_items})
input_config = recommendationengine.InputConfig(
catalog_inline_source=catalog_inline_source)
request = recommendationengine.ImportCatalogItemsRequest(
parent=self.parent, input_config=input_config)
try:
operation = self._client.import_catalog_items(
request=request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata)
self.counter.inc(len(catalog_items))
yield operation.result()
except Exception:
yield pvalue.TaggedOutput(FAILED_CATALOG_ITEMS, catalog_items)
class WriteUserEvent(PTransform):
"""Write user event information.
The `PTransform` returns a PCollectionTuple with PCollections of
successfully and failed written UserEvents.
Example usage::
pipeline
| WriteUserEvent(
project='example-gcp-project',
catalog_name='my-catalog',
event_store='my_event_store')
"""
def __init__(
self,
project: str = None,
retry: Retry = None,
timeout: float = 120,
metadata: Sequence[Tuple[str, str]] = None,
catalog_name: str = "default_catalog",
event_store: str = "default_event_store"):
"""Initializes a :class:`WriteUserEvent` transform.
Args:
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
event_store (str): Optional. Name of the event store.
Default: 'default_event_store'
"""
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.catalog_name = catalog_name
self.event_store = event_store
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return pcoll | ParDo(
_WriteUserEventFn(
self.project,
self.retry,
self.timeout,
self.metadata,
self.catalog_name,
self.event_store))
class _WriteUserEventFn(DoFn):
FAILED_USER_EVENTS = "failed_user_events"
def __init__(
self,
project=None,
retry=None,
timeout=120,
metadata=None,
catalog_name=None,
event_store=None):
self._client = None
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.parent = f"projects/{project}/locations/global/catalogs/"\
f"{catalog_name}/eventStores/{event_store}"
self.counter = Metrics.counter(self.__class__, "api_calls")
def setup(self):
if self._client is None:
self._client = get_recommendation_user_event_client()
def process(self, element):
user_event = recommendationengine.UserEvent(element)
request = recommendationengine.WriteUserEventRequest(
parent=self.parent, user_event=user_event)
try:
created_user_event = self._client.write_user_event(request)
self.counter.inc()
yield recommendationengine.UserEvent.to_dict(created_user_event)
except Exception:
yield pvalue.TaggedOutput(
self.FAILED_USER_EVENTS,
recommendationengine.UserEvent.to_dict(user_event))
class ImportUserEvents(PTransform):
"""Imports userevents in bulk.
The `PTransform` returns a PCollectionTuple with PCollections of
successfully and failed imported UserEvents.
Example usage::
pipeline
| ImportUserEvents(
project='example-gcp-project',
catalog_name='my-catalog',
event_store='my_event_store')
"""
def __init__(
self,
max_batch_size: int = 5000,
project: str = None,
retry: Retry = None,
timeout: float = 120,
metadata: Sequence[Tuple[str, str]] = None,
catalog_name: str = "default_catalog",
event_store: str = "default_event_store"):
"""Initializes a :class:`WriteUserEvent` transform.
Args:
batch_size (int): Required. Maximum number of catalogitems
per request.
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
event_store (str): Optional. Name of the event store.
Default: 'default_event_store'
"""
self.max_batch_size = max_batch_size
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.catalog_name = catalog_name
self.event_store = event_store
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return (
pcoll | GroupIntoBatches.WithShardedKey(self.max_batch_size) | ParDo(
_ImportUserEventsFn(
self.project,
self.retry,
self.timeout,
self.metadata,
self.catalog_name,
self.event_store)))
class _ImportUserEventsFn(DoFn):
FAILED_USER_EVENTS = "failed_user_events"
def __init__(
self,
project=None,
retry=None,
timeout=120,
metadata=None,
catalog_name=None,
event_store=None):
self._client = None
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.parent = f"projects/{project}/locations/global/catalogs/"\
f"{catalog_name}/eventStores/{event_store}"
self.counter = Metrics.counter(self.__class__, "api_calls")
def setup(self):
if self._client is None:
self.client = get_recommendation_user_event_client()
def process(self, element):
user_events = [recommendationengine.UserEvent(e) for e in element[1]]
user_event_inline_source = recommendationengine.UserEventInlineSource(
{"user_events": user_events})
input_config = recommendationengine.InputConfig(
user_event_inline_source=user_event_inline_source)
request = recommendationengine.ImportUserEventsRequest(
parent=self.parent, input_config=input_config)
try:
operation = self._client.write_user_event(request)
self.counter.inc(len(user_events))
yield recommendationengine.PredictResponse.to_dict(operation.result())
except Exception:
yield pvalue.TaggedOutput(self.FAILED_USER_EVENTS, user_events)
class PredictUserEvent(PTransform):
"""Make a recommendation prediction.
The `PTransform` returns a PCollection
Example usage::
pipeline
| PredictUserEvent(
project='example-gcp-project',
catalog_name='my-catalog',
event_store='my_event_store',
placement_id='recently_viewed_default')
"""
def __init__(
self,
project: str = None,
retry: Retry = None,
timeout: float = 120,
metadata: Sequence[Tuple[str, str]] = None,
catalog_name: str = "default_catalog",
event_store: str = "default_event_store",
placement_id: str = None):
"""Initializes a :class:`PredictUserEvent` transform.
Args:
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
event_store (str): Optional. Name of the event store.
Default: 'default_event_store'
placement_id (str): Required. ID of the recommendation engine
placement. This id is used to identify the set of models that
will be used to make the prediction.
"""
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.placement_id = placement_id
self.catalog_name = catalog_name
self.event_store = event_store
if placement_id is None:
raise ValueError('placement_id must be specified')
else:
self.placement_id = placement_id
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return pcoll | ParDo(
_PredictUserEventFn(
self.project,
self.retry,
self.timeout,
self.metadata,
self.catalog_name,
self.event_store,
self.placement_id))
class _PredictUserEventFn(DoFn):
FAILED_PREDICTIONS = "failed_predictions"
def __init__(
self,
project=None,
retry=None,
timeout=120,
metadata=None,
catalog_name=None,
event_store=None,
placement_id=None):
self._client = None
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.name = f"projects/{project}/locations/global/catalogs/"\
f"{catalog_name}/eventStores/{event_store}/placements/"\
f"{placement_id}"
self.counter = Metrics.counter(self.__class__, "api_calls")
def setup(self):
if self._client is None:
self._client = get_recommendation_prediction_client()
def process(self, element):
user_event = recommendationengine.UserEvent(element)
request = recommendationengine.PredictRequest(
name=self.name, user_event=user_event)
try:
prediction = self._client.predict(request)
self.counter.inc()
yield [
recommendationengine.PredictResponse.to_dict(p)
for p in prediction.pages
]
except Exception:
yield pvalue.TaggedOutput(self.FAILED_PREDICTIONS, user_event)
| 33.389078
| 80
| 0.667127
|
f120610c38aecbaa914e96b6b708151cfaf30194
| 975
|
py
|
Python
|
setup.py
|
bilelmoussaoui/pyfavicon
|
ebc74bd3faced9bd84cba25c12da288e7f7753f0
|
[
"MIT"
] | 5
|
2019-06-06T02:49:23.000Z
|
2020-12-07T23:25:56.000Z
|
setup.py
|
bilelmoussaoui/pyfavicon
|
ebc74bd3faced9bd84cba25c12da288e7f7753f0
|
[
"MIT"
] | 1
|
2020-04-13T20:46:13.000Z
|
2020-04-15T09:46:38.000Z
|
setup.py
|
bilelmoussaoui/pyfavicon
|
ebc74bd3faced9bd84cba25c12da288e7f7753f0
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyfavicon",
version="0.1.1",
author="Bilal Elmoussaoui",
author_email="bil.elmoussaoui@gmail.com",
description="Async favicon fetcher",
long_description_content_type="text/markdown",
long_description=long_description,
license='MIT',
url="https://github.com/bilelmoussaoui/pyfavicon",
packages=['pyfavicon'],
install_requires=[
'aiohttp',
'beautifulsoup4',
'Pillow'
],
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP',
],
tests_require=[
'pytest',
'coveralls',
'pytest-cov',
'pytest-asyncio'
],
test_suite='tests',
)
| 25.657895
| 54
| 0.6
|
dc0374737ed48fda320a26b59fc4277e53642770
| 1,134
|
py
|
Python
|
pydecoder/json.py
|
iodevs/pydecoder
|
7b264ea7b17f5af4a06c190ef0973fb535d28fc3
|
[
"BSD-3-Clause"
] | 2
|
2017-05-22T10:08:59.000Z
|
2018-03-02T00:26:05.000Z
|
pydecoder/json.py
|
iodevs/pydecoder
|
7b264ea7b17f5af4a06c190ef0973fb535d28fc3
|
[
"BSD-3-Clause"
] | 1,081
|
2017-05-22T10:02:34.000Z
|
2022-03-31T10:20:30.000Z
|
pydecoder/json.py
|
iodevs/pydecoder
|
7b264ea7b17f5af4a06c190ef0973fb535d28fc3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
'''Library fo json decode'''
from pyresult import (
ok,
error,
rmap,
fold
)
from toolz import curry, get_in
from pydecoder.primitives import ( # noqa pylint: disable=unused-import
to_int,
to_float,
to_string,
to_bool,
null,
)
def _to_key_list(keys):
if not isinstance(keys, tuple) and not isinstance(keys, list):
return (keys, )
return keys
@curry
def getter(json, keys): # pylint: disable=redefined-outer-name
'''Get data from json'''
try:
return ok(
get_in(
_to_key_list(keys),
json,
no_default=True
)
)
except KeyError:
return error(u'Value is empty or path/key {0!r} not found...'.format(keys))
@curry
def decode(creator, decoders, json_data):
'''Run decoders on json and result pass to creator function
json: (args -> value) -> List Decoder -> Json -> Result err value
'''
values = [decoder(getter(json_data)) for decoder in decoders] # pylint: disable=no-value-for-parameter
return rmap(creator, fold(values))
| 21.807692
| 107
| 0.60582
|
849514405445888b37e71c5accb2a10b52ea5c79
| 12,503
|
py
|
Python
|
tests/integration/s3/test_bucket.py
|
pfhayes/boto
|
a9746be5f5cdca073c0013ad61e0c423c197502a
|
[
"MIT"
] | 8
|
2016-02-08T11:59:31.000Z
|
2020-05-31T15:19:54.000Z
|
tests/integration/s3/test_bucket.py
|
haandol/boto
|
a82244fc9024d122c2e16e57f35faaea51b59405
|
[
"MIT"
] | 3
|
2020-05-25T02:39:14.000Z
|
2021-06-07T05:26:12.000Z
|
tests/integration/s3/test_bucket.py
|
haandol/boto
|
a82244fc9024d122c2e16e57f35faaea51b59405
|
[
"MIT"
] | 9
|
2016-02-26T16:47:19.000Z
|
2022-01-19T07:27:06.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 Bucket
"""
from mock import patch, Mock
import unittest
import time
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from boto.s3.bucketlogging import BucketLogging
from boto.s3.lifecycle import Lifecycle
from boto.s3.lifecycle import Transition
from boto.s3.lifecycle import Expiration
from boto.s3.lifecycle import Rule
from boto.s3.acl import Grant
from boto.s3.tagging import Tags, TagSet
from boto.s3.website import RedirectLocation
from boto.compat import urllib
class S3BucketTest (unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'bucket-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_next_marker(self):
expected = ["a/", "b", "c"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# Normal list of first 2 keys will have
# no NextMarker set, so we use last key to iterate
# last element will be "b" so no issue.
rs = self.bucket.get_all_keys(max_keys=2)
for element in rs:
pass
self.assertEqual(element.name, "b")
self.assertEqual(rs.next_marker, None)
# list using delimiter of first 2 keys will have
# a NextMarker set (when truncated). As prefixes
# are grouped together at the end, we get "a/" as
# last element, but luckily we have next_marker.
rs = self.bucket.get_all_keys(max_keys=2, delimiter="/")
for element in rs:
pass
self.assertEqual(element.name, "a/")
self.assertEqual(rs.next_marker, "b")
# ensure bucket.list() still works by just
# popping elements off the front of expected.
rs = self.bucket.list()
for element in rs:
self.assertEqual(element.name, expected.pop(0))
self.assertEqual(expected, [])
def test_list_with_url_encoding(self):
expected = ["α", "β", "γ"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# ensure bucket.list() still works by just
# popping elements off the front of expected.
orig_getall = self.bucket._get_all
getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k)
with patch.object(self.bucket, '_get_all', getall):
rs = self.bucket.list(encoding_type="url")
for element in rs:
name = urllib.parse.unquote(element.name.encode('utf-8'))
self.assertEqual(name, expected.pop(0))
self.assertEqual(expected, [])
def test_logging(self):
# use self.bucket as the target bucket so that teardown
# will delete any log files that make it into the bucket
# automatically and all we have to do is delete the
# source bucket.
sb_name = "src-" + self.bucket_name
sb = self.conn.create_bucket(sb_name)
# grant log write perms to target bucket using canned-acl
self.bucket.set_acl("log-delivery-write")
target_bucket = self.bucket_name
target_prefix = u"jp/ログ/"
# Check existing status is disabled
bls = sb.get_logging_status()
self.assertEqual(bls.target, None)
# Create a logging status and grant auth users READ PERM
authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
authr = Grant(permission="READ", type="Group", uri=authuri)
sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr])
# Check the status and confirm its set.
bls = sb.get_logging_status()
self.assertEqual(bls.target, target_bucket)
self.assertEqual(bls.prefix, target_prefix)
self.assertEqual(len(bls.grants), 1)
self.assertEqual(bls.grants[0].type, "Group")
self.assertEqual(bls.grants[0].uri, authuri)
# finally delete the src bucket
sb.delete()
def test_tagging(self):
tagging = """
<Tagging>
<TagSet>
<Tag>
<Key>tagkey</Key>
<Value>tagvalue</Value>
</Tag>
</TagSet>
</Tagging>
"""
self.bucket.set_xml_tags(tagging)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'tagkey')
self.assertEqual(response[0][0].value, 'tagvalue')
self.bucket.delete_tags()
try:
self.bucket.get_tags()
except S3ResponseError as e:
self.assertEqual(e.code, 'NoSuchTagSet')
except Exception as e:
self.fail("Wrong exception raised (expected S3ResponseError): %s"
% e)
else:
self.fail("Expected S3ResponseError, but no exception raised.")
def test_tagging_from_objects(self):
"""Create tags from python objects rather than raw xml."""
t = Tags()
tag_set = TagSet()
tag_set.add_tag('akey', 'avalue')
tag_set.add_tag('anotherkey', 'anothervalue')
t.add_tag_set(tag_set)
self.bucket.set_tags(t)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'akey')
self.assertEqual(response[0][0].value, 'avalue')
self.assertEqual(response[0][1].key, 'anotherkey')
self.assertEqual(response[0][1].value, 'anothervalue')
def test_website_configuration(self):
response = self.bucket.configure_website('index.html')
self.assertTrue(response)
config = self.bucket.get_website_configuration()
self.assertEqual(config, {'WebsiteConfiguration':
{'IndexDocument': {'Suffix': 'index.html'}}})
config2, xml = self.bucket.get_website_configuration_with_xml()
self.assertEqual(config, config2)
self.assertTrue('<Suffix>index.html</Suffix>' in xml, xml)
def test_website_redirect_all_requests(self):
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {
'RedirectAllRequestsTo': {
'HostName': 'example.com'}}})
# Can configure the protocol as well.
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com', 'https'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {'RedirectAllRequestsTo': {
'HostName': 'example.com',
'Protocol': 'https',
}}}
)
def test_lifecycle(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', '', 'Enabled', 30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertEqual(actual_lifecycle.id, 'myid')
self.assertEqual(actual_lifecycle.prefix, '')
self.assertEqual(actual_lifecycle.status, 'Enabled')
self.assertEqual(actual_lifecycle.transition, None)
def test_lifecycle_with_glacier_transition(self):
lifecycle = Lifecycle()
transition = Transition(days=30, storage_class='GLACIER')
rule = Rule('myid', prefix='', status='Enabled', expiration=None,
transition=transition)
lifecycle.append(rule)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
transition = response[0].transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(transition.date, None)
def test_lifecycle_multi(self):
date = '2022-10-12T00:00:00.000Z'
sc = 'GLACIER'
lifecycle = Lifecycle()
lifecycle.add_rule("1", "1/", "Enabled", 1)
lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
lifecycle.add_rule("4", "4/", "Enabled", None,
Transition(days=4, storage_class=sc))
lifecycle.add_rule("5", "5/", "Enabled", None,
Transition(date=date, storage_class=sc))
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
if rule.id == "1":
self.assertEqual(rule.prefix, "1/")
self.assertEqual(rule.expiration.days, 1)
elif rule.id == "2":
self.assertEqual(rule.prefix, "2/")
self.assertEqual(rule.expiration.days, 2)
elif rule.id == "3":
self.assertEqual(rule.prefix, "3/")
self.assertEqual(rule.expiration.date, date)
elif rule.id == "4":
self.assertEqual(rule.prefix, "4/")
self.assertEqual(rule.transition.days, 4)
self.assertEqual(rule.transition.storage_class, sc)
elif rule.id == "5":
self.assertEqual(rule.prefix, "5/")
self.assertEqual(rule.transition.date, date)
self.assertEqual(rule.transition.storage_class, sc)
else:
self.fail("unexpected id %s" % rule.id)
def test_lifecycle_jp(self):
# test lifecycle with Japanese prefix
name = "Japanese files"
prefix = "日本語/"
days = 30
lifecycle = Lifecycle()
lifecycle.add_rule(name, prefix, "Enabled", days)
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
self.assertEqual(rule.id, name)
self.assertEqual(rule.expiration.days, days)
#Note: Boto seems correct? AWS seems broken?
#self.assertEqual(rule.prefix, prefix)
def test_lifecycle_with_defaults(self):
lifecycle = Lifecycle()
lifecycle.add_rule(expiration=30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertNotEqual(len(actual_lifecycle.id), 0)
self.assertEqual(actual_lifecycle.prefix, '')
def test_lifecycle_rule_xml(self):
# create a rule directly with id, prefix defaults
rule = Rule(status='Enabled', expiration=30)
s = rule.to_xml()
# Confirm no ID is set in the rule.
self.assertEqual(s.find("<ID>"), -1)
# Confirm Prefix is '' and not set to 'None'
self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
| 41.400662
| 85
| 0.631928
|
98d7482db60433a8772bd29058ddaa96f4d8d0af
| 3,225
|
py
|
Python
|
tempest/lib/services/network/__init__.py
|
sapcc/tempest
|
93a902072fd9986f2bb660166552f37d9eb5bdbb
|
[
"Apache-2.0"
] | 254
|
2015-01-05T19:22:52.000Z
|
2022-03-29T08:14:54.000Z
|
tempest/lib/services/network/__init__.py
|
openstack/tempest
|
c2f5a47cfba430d2086d1e67f4234ca0a9f855ff
|
[
"Apache-2.0"
] | 13
|
2015-03-02T15:53:04.000Z
|
2022-02-16T02:28:14.000Z
|
tempest/lib/services/network/__init__.py
|
openstack/tempest
|
c2f5a47cfba430d2086d1e67f4234ca0a9f855ff
|
[
"Apache-2.0"
] | 367
|
2015-01-07T15:05:39.000Z
|
2022-03-04T09:50:35.000Z
|
# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from tempest.lib.services.network.agents_client import AgentsClient
from tempest.lib.services.network.extensions_client import ExtensionsClient
from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
from tempest.lib.services.network.floating_ips_port_forwarding_client import \
FloatingIpsPortForwardingClient
from tempest.lib.services.network.log_resource_client import LogResourceClient
from tempest.lib.services.network.loggable_resource_client import \
LoggableResourceClient
from tempest.lib.services.network.metering_label_rules_client import \
MeteringLabelRulesClient
from tempest.lib.services.network.metering_labels_client import \
MeteringLabelsClient
from tempest.lib.services.network.networks_client import NetworksClient
from tempest.lib.services.network.ports_client import PortsClient
from tempest.lib.services.network.qos_client import QosClient
from tempest.lib.services.network.qos_limit_bandwidth_rules_client import \
QosLimitBandwidthRulesClient
from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
QosMinimumBandwidthRulesClient
from tempest.lib.services.network.quotas_client import QuotasClient
from tempest.lib.services.network.routers_client import RoutersClient
from tempest.lib.services.network.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.lib.services.network.security_groups_client import \
SecurityGroupsClient
from tempest.lib.services.network.segments_client import SegmentsClient
from tempest.lib.services.network.service_providers_client import \
ServiceProvidersClient
from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
from tempest.lib.services.network.subnets_client import SubnetsClient
from tempest.lib.services.network.tags_client import TagsClient
from tempest.lib.services.network.trunks_client import TrunksClient
from tempest.lib.services.network.versions_client import NetworkVersionsClient
__all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
'FloatingIpsPortForwardingClient', 'MeteringLabelRulesClient',
'MeteringLabelsClient', 'NetworksClient', 'NetworkVersionsClient',
'PortsClient', 'QosClient', 'QosMinimumBandwidthRulesClient',
'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
'SecurityGroupRulesClient', 'SecurityGroupsClient',
'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
'LoggableResourceClient']
| 55.603448
| 79
| 0.819845
|
8e4c2905f59fe60291646f311d5e167cb11e65ba
| 15,548
|
py
|
Python
|
src/data/genome_context.py
|
cdiaza/dimpl
|
2e758595b916899b22e72f55bc393653743fd944
|
[
"MIT"
] | 1
|
2021-01-16T20:39:45.000Z
|
2021-01-16T20:39:45.000Z
|
src/data/genome_context.py
|
cdiaza/dimpl
|
2e758595b916899b22e72f55bc393653743fd944
|
[
"MIT"
] | null | null | null |
src/data/genome_context.py
|
cdiaza/dimpl
|
2e758595b916899b22e72f55bc393653743fd944
|
[
"MIT"
] | null | null | null |
from Bio import Entrez
import pandas as pd
import numpy as np
import os
import urllib.request
import shutil
import gzip
import sys
from Bio import SeqIO
import configparser
import subprocess
from IPython.core.display import display, HTML, SVG
from urllib.error import HTTPError
import time
from Bio import AlignIO
import string
from itertools import compress
import genomeview
from src.shell.gff2bed import convert
Entrez.email = os.environ.get("ENTREZ_EMAIL")
Entrez.api_key = os.environ.get("ENTREZ_APIKEY")
def get_nuccore_id(hit_accession):
'''
get the nuccore id of the hit by searching the nuccore database with the hit accession
'''
try:
#search the nuccore database for info on the hit_accession
nuccore_search_handle = Entrez.esearch(term=hit_accession, field='ACCN', db='nuccore')
result = Entrez.read(nuccore_search_handle)
nuccore_search_handle.close()
nuccore_id = result['IdList'][0]
return nuccore_id
except HTTPError as e:
if(e.code == 429):
time.sleep(0.5)
return get_nuccore_id(hit_accession)
else:
raise
def fetch_deprecated_record_id(nuccore_id):
'''
returns record id for deprecated accessions
'''
try:
fetch_record_handle = Entrez.efetch(db="nucleotide", id=nuccore_id, rettype="gb", retmode="xml")
result = Entrez.read(fetch_record_handle)
fetch_record_handle.close()
summary = result[0]
acc = result[0]['GBSeq_xrefs'][-1]['GBXref_id']
search_term = "{} [Assembly Accession]".format(acc)
assembly_query_handle = Entrez.esearch(db="assembly", term=search_term, field='ASAC')
assembly_query_result = Entrez.read(assembly_query_handle)
assembly_query_handle.close()
assembly_record_ids = assembly_query_result["IdList"]
# Check to make sure a single record was returned, not multiple. Then save assembly record ID.
if len(assembly_record_ids) == 1:
record_id = assembly_record_ids[0]
else:
# Get summaries of the duplicate files
summary_handle= Entrez.esummary(id=','.join(assembly_query_result['IdList']), db='assembly')
result = Entrez.read(summary_handle)
summary_handle.close()
# Get a list of the assembly accessions
accession_list = [document_summary['AssemblyAccession'] for document_summary in result['DocumentSummarySet']['DocumentSummary']]
# Check if the assembly accession matches search term (disregard GCA vs GCF)
accession_match = [accession[3:] in search_term for accession in accession_list]
# Extract the index of the matching accession
index = list(compress(range(len(accession_list)), accession_match))
if len(index) != 1:
raise ValueError("{} records were returned by Entrez when searching for assembly {}".format(len(assembly_record_ids), genome.assembly_acc))
else:
record_id = result['DocumentSummarySet']['DocumentSummary'][index[0]].attributes['uid']
return record_id
except HTTPError as e:
if(e.code == 429):
time.sleep(0.5)
return get_nuccore_id(hit_accession)
else:
raise
def get_assembly_link(nuccore_id):
'''
returns record ID from assembly link.
'''
try:
#get link to genome assembly information
assembly_link_handle =Entrez.elink(id = nuccore_id, dbfrom = 'nuccore', linkfrom = 'nuccore_assembly' , db = 'assembly')
assembly_query_result = Entrez.read(assembly_link_handle)
assembly_link_handle.close()
#print(assembly_query_result)
try:
#save assembly link ID for assembly information
assembly_record_ids = assembly_query_result[0]['LinkSetDb'][0]
# Check to make sure a single record was returned, not multiple. Then save assembly record ID.
if len(assembly_record_ids['Link']) == 1:
record_id = assembly_record_ids['Link'][0]['Id']
else:
record_id = fetch_deprecated_record_id(nuccore_id)
except IndexError as ierr:
record_id = fetch_deprecated_record_id(nuccore_id)
return record_id
except HTTPError as e:
if(e.code == 429):
time.sleep(0.5)
return get_assembly_link(nuccore_id)
else:
raise
def get_assembly_document(record_id):
'''
get the assembly accession of the hit from summary information of assembly record
if nuccoreid is deprecated - and record_id is 0 - then return 0
'''
try:
#get information on assembly for the specific assembly accession
assembly_record_summary_handle = Entrez.esummary(db="assembly", id=record_id)
result = Entrez.read(assembly_record_summary_handle, validate = False)
assembly_record_summary_handle.close()
#extract assembly record summary for ftp path later
assembly_record_summary = result['DocumentSummarySet']['DocumentSummary'][0]
return assembly_record_summary
except HTTPError as e:
if(e.code == 429):
time.sleep(0.5)
return get_assembly_document(record_id)
else:
raise
def find_seq_in_alignment(id, alignment):
'''returns alignment sequence'''
for seqrecord in alignment:
if seqrecord.id == id:
s = str(seqrecord.seq)
s = s.replace('.', '')
s = s.replace('-', '')
return s
def get_taxonomy(nuccore_id):
try:
record_id = get_assembly_link(nuccore_id)
tax_id = get_assembly_document(record_id)['Taxid']
taxonomy_info_handle= Entrez.efetch(db = 'taxonomy', id = tax_id, retmode = 'xml')
result = Entrez.read(taxonomy_info_handle)
taxonomy_info_handle.close()
return result[0]['Lineage']
except HTTPError as e:
if(e.code == 429):
time.sleep(0.5)
return get_taxonomy(nuccore_id)
else:
raise
def download_gff_fna(hit_accession):
'''
downloads the gff and fasta files for the hit accession
returns filename
'''
#get link to assembly record
nuccore_id = get_nuccore_id(hit_accession)
record_id = get_assembly_link(nuccore_id)
#get document information of assembly record
assembly_record_summary = get_assembly_document(record_id)
#get assembly information
assembly_accn = assembly_record_summary['AssemblyAccession']
assembly_name = assembly_record_summary['AssemblyName']
# Pull the FTP path from the assembly record summary.
ftp_path = assembly_record_summary['FtpPath_RefSeq'] + ''
base_filename = ftp_path[ftp_path.rfind("/") + 1:]
output_folder= "data/raw/download"
#if the output folder doesn't exist, create it, in the directory
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#create filenames for the gff and fasta files
refseq_gff_zip_filename = "data/raw/download/{}_genomic.gff.gz".format(base_filename)
refseq_fasta_zip_filename = "data/raw/download/{}_genomic.fna.gz".format(base_filename)
if not os.path.isfile(refseq_gff_zip_filename):
# Build the full path to the genomic gff/fna files
refseq_gff_zip_ftp_path = "{}/{}_genomic.gff.gz".format(ftp_path, base_filename)
refseq_fasta_zip_ftp_path = "{}/{}_genomic.fna.gz".format(ftp_path, base_filename)
#create gff/fna files
with open(refseq_gff_zip_filename, 'wb') as refseq_gff_zip_file:
request_gff = urllib.request.urlopen(refseq_gff_zip_ftp_path)
#copy the content of source file to destination file.
shutil.copyfileobj(request_gff, refseq_gff_zip_file)
with open(refseq_fasta_zip_filename, 'wb') as refseq_fasta_zip_file:
request_fasta = urllib.request.urlopen(refseq_fasta_zip_ftp_path)
shutil.copyfileobj(request_fasta, refseq_fasta_zip_file)
# Build the full path to the UNZIPPED genomic gff/fna files
refseq_gff_ftp_path_unzip = "{}/{}_genomic.gff".format(ftp_path, base_filename)
refseq_fasta_ftp_path_unzip = "{}/{}_genomic.fasta".format(ftp_path, base_filename)
#unzip gff.gz file and save as .gff file
input_gff = gzip.GzipFile(refseq_gff_zip_filename, 'rb')
s_gff = input_gff.read()
input_gff.close()
output_gff = open("data/raw/download/{}_genomic.gff".format(base_filename), 'wb')
output_gff.write(s_gff)
output_gff.close()
#unzip fna.gz file and save as .fna file
input_fna = gzip.GzipFile(refseq_fasta_zip_filename, 'rb')
s_fna = input_fna.read()
input_fna.close()
output_fna = open("data/raw/download/{}_genomic.fna".format(base_filename), 'wb')
output_fna.write(s_fna)
output_fna.close()
return base_filename
def build_context_image(hit_row, alignment, upstream_range = 4000, downstream_range = 4000):
#extract hit accession of target from hit_row
hit_accession= hit_row.target_name
#extract start nt of target, stop nt of target and strand from hit_row
start= hit_row.seq_from
stop= hit_row.seq_to
seq_length = stop-start+1
strand = hit_row.strand
#if strand is + then flip is true, if strand is - then flip is false. This is needed in the genome browser url.
flip_val = 'false'
if(strand == "-"):
flip_val = 'true'
#get the assembly accession of the hit and download the fasta and gff files
if hit_row.assembly_accession=='nan':
base_filename = download_gff_fna(hit_accession)
else:
base_filename = hit_row.assembly_accession
fna_file = "data/raw/download/"+base_filename+"_genomic.fna"
gff_file = "data/raw/download/"+base_filename+"_genomic.gff"
#extract sequence of the part of the target that matches the query
target_sequence = find_seq_in_alignment(hit_row.target_coords, alignment)
target_sequence = target_sequence.replace('T','U')
#extract E-value
e_value = hit_row.e_value
#extract %gc
percent_gc = hit_row.gc
#extract score
score = hit_row.score
#extract target-name
target_name = hit_row.target_coords
#extract taxonomy
if hit_row.lineage=='nan':
lineage = get_taxonomy(hit_accession)
else:
lineage = hit_row.lineage
#set range
down_limit = start - downstream_range
up_limit = stop + upstream_range
#print statements for variables to be shown in image
print("Match #{}".format(int(hit_row.name)+1))
print("E-value: "+ str(e_value))
print("%GC: "+ str(percent_gc))
print("Score: "+ str(score))
print("Genome Assembly: " +str(base_filename))
print("Target: "+ target_name)
print("Lineage: " + lineage)
print("Matched Sequence: "+target_sequence)
#clickable link
genome_browser_url ='https://www.ncbi.nlm.nih.gov/projects/sviewer/?id={}&v={}:{}&c=FF6600&theme=Details&flip={}&select=null&content=3&color=0&label=1&geneModel=0&decor=0&layout=0&spacing=0&alncolor=on&m={},{}&mn=5,3'.format(hit_accession, down_limit, up_limit, flip_val, start, stop)
try:
display(HTML('<a href="{}")>genome browser</a>'.format(genome_browser_url)))
except HTTPError as e:
if(e.code == 429):
time.sleep(0.5)
return display(HTML('<a href="{}")>genome browser</a>'.format(genome_browser_url)))
else:
raise
gff_file_zip="/home/jovyan/work/data/raw/download/"+ base_filename+ "_genomic.gff.gz"
bed_file="/home/jovyan/work/data/raw/features/"+ base_filename+ "_genomic.bed"
convert(gff_file_zip,bed_file, desc_only=True)
def prerender(renderer, element):
# prerenderers get run before the track is rendered
if start < stop:
x1 = element.scale.topixels(start) # converting genomic coordinates to screen coordinates
x2 = element.scale.topixels(stop)
yield from renderer.rect(x1, 0, x2-x1, element.height, fill="lightblue", stroke="none")
if start > stop:
x1 = element.scale.topixels(start) # converting genomic coordinates to screen coordinates
x2 = element.scale.topixels(stop)
yield from renderer.rect(x1, 0, x1-x2, element.height, fill="lightblue", stroke="none")
doc=genomeview.visualize_data({"":bed_file},hit_accession,down_limit,up_limit)
cur_track = genomeview.get_one_track(doc, "")
cur_track.prerenderers = [prerender]
display(doc)
return base_filename, lineage
def get_all_images(results_csv_filename, alignment):
results_df = pd.read_csv(results_csv_filename, index_col=0)
results_df['lineage'] = results_df['lineage'].astype(str)
results_df['assembly_accession'] = results_df['assembly_accession'].astype(str)
updated_results_df = results_df.copy(deep=True)
for index, row in results_df.iterrows():
assembly_accession, lineage = build_context_image(row, alignment, upstream_range = 4000, downstream_range = 4000)
updated_results_df.loc[index, 'lineage'] = lineage
updated_results_df.loc[index, 'assembly_accession'] = assembly_accession
updated_results_df.to_csv(results_csv_filename)
def build_target_coords(target_name, seq_from, seq_to):
return "{}/{}-{}".format(target_name, seq_from, seq_to)
def run_rscape(outdir, sto_filename, fold=True, output=True):
truncated_filename = sto_filename[sto_filename.rfind('/')+1:sto_filename.rfind('.sto')]
if fold:
arguments = ['R-scape', '--fold', '--outdir', outdir, sto_filename]
else:
arguments = ['R-scape', '--r2rall', '--outdir', outdir, sto_filename]
result = subprocess.run(arguments, capture_output=True)
if output:
print(result.stdout.decode())
# List of the suffixes of excess files to delete
deleted_file_suffix = ['cov', 'dplot.ps','dplot.svg', 'power', 'sorted.cov', 'surv', 'surv.ps', 'surv.svg', 'R2R.sto', 'R2R.sto.pdf' ]
for suffix in deleted_file_suffix:
file_to_delete = "{}/{}_1.{}".format(outdir, truncated_filename, suffix)
if os.path.exists(file_to_delete):
os.remove(file_to_delete)
svg_filename = "{}/{}_1.R2R.sto.svg".format(outdir, truncated_filename)
if fold:
deleted_fold_suffix = ['dplot.ps', 'dplot.svg', 'power', 'R2R.sto', 'R2R.sto.pdf', 'cov']
for suffix in deleted_fold_suffix:
file_to_delete = "{}/{}_1.fold.{}".format(outdir, truncated_filename, suffix)
if os.path.exists(file_to_delete):
os.remove(file_to_delete)
os.remove("{}/{}_1.sorted.fold.cov".format(outdir, truncated_filename, suffix))
svg_filename = "{}/{}_1.fold.R2R.sto.svg".format(outdir, truncated_filename)
display(SVG(filename=svg_filename))
def tar_subdir_members(tar, import_tar_name):
tar_foldername = '.'.join(import_tar_name.split('/')[-1].split('.')[:-2]) + '/'
tar_foldername_length = len(tar_foldername)
for member in tar.getmembers():
if member.path.startswith(tar_foldername):
member.path = member.path[tar_foldername_length:]
yield member
| 38.580645
| 291
| 0.664394
|
17297042557483bbdc16e305761a6db9f61316d6
| 4,029
|
py
|
Python
|
mr_scrapper/dao/google_drive.py
|
Mario8289/mr_scrapper
|
38c3d0a150a7cb3cd3b1806cd0de8e8c70ea83d5
|
[
"FTL"
] | null | null | null |
mr_scrapper/dao/google_drive.py
|
Mario8289/mr_scrapper
|
38c3d0a150a7cb3cd3b1806cd0de8e8c70ea83d5
|
[
"FTL"
] | null | null | null |
mr_scrapper/dao/google_drive.py
|
Mario8289/mr_scrapper
|
38c3d0a150a7cb3cd3b1806cd0de8e8c70ea83d5
|
[
"FTL"
] | null | null | null |
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from typing import List
from io import StringIO
import pandas as pd
class GoogleDriveDao:
def __init__(self, credentials="mycreds.txt"):
self.drive = self._connect(credentials)
@staticmethod
def _connect(credentials):
gauth = GoogleAuth()
gauth.LoadCredentialsFile(credentials)
if gauth.credentials is None:
# Authenticate if they're not there
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
drive = GoogleDrive(gauth)
return drive
def reconnect(self, crendentials):
self.drive = self._connect(crendentials)
def get_folder_id(self, folder: str, parents: str = None, create: bool = False) -> List:
base = "trashed=false and mimeType = 'application/vnd.google-apps.folder'"
if not parents:
q = f"'root' in parents and title = '{folder}' and {base}"
folder_id = self.get_id(q)
if not folder_id and create:
self.create_folder(folder, parent=None)
folder_id = self.get_id(q)
else:
folder_id = ['root']
for folder in parents.split('/') + [folder]:
q = f"'{folder_id[0]}' in parents and title = '{folder}' and {base}"
folder_id_iter = self.get_id(q)
if not folder_id_iter and create:
self.create_folder(folder, parent=folder_id)
folder_id_iter = self.get_id(q)
folder_id = folder_id_iter
return folder_id
def get_file_id(self, folder_id: str = None, title: str = None) -> List:
q = " trashed=false and mimeType != 'application/vnd.google-apps.folder'"
ids: List = []
if title:
q = f"title = '{title}' and {q}"
if folder_id:
for folder in folder_id:
q = f"'{folder}' in parents and {q}"
ids.extend(self.get_id(q))
return ids
def create_folder(self, folder, parent=None):
if parent:
folder = self.drive.CreateFile(
{'title': folder,
'parents': [{"kind": "drive#fileLink", "id": parent}],
"mimeType": "application/vnd.google-apps.folder"}
)
else:
folder = self.drive.CreateFile(
{'title': folder,
"mimeType": "application/vnd.google-apps.folder"}
)
folder.Upload()
def get_id(self, q: str) -> List:
files = self.drive.ListFile({'q': q}).GetList()
if len(files) != 0:
return [x['id'] for x in files]
else:
return []
def upload_string(self, string: str, file: str, folder_id: str = None):
if folder_id:
file = self.drive.CreateFile({'title': file, 'parents': [{'id': folder_id}]})
else:
file = self.drive.CreateFile({'title': file})
file.SetContentString(string)
file.Upload()
def upload_file(self, string: str, file: str, folder_id: str = None):
if folder_id:
file = self.drive.CreateFile({'title': file, 'parents': [{'id': folder_id}]})
else:
file = self.drive.CreateFile({'title': file})
file.SetContentFile(string)
file.Upload()
def delete_file(self, file_id: str, permanently=True):
file = self.drive.CreateFile({'id': file_id})
if permanently:
file.Delete()
else:
file.Trash()
def load_file(self, file_id: str, sep: str = ','):
file_obj = self.drive.CreateFile({'id': file_id})
data = StringIO(file_obj.GetContentString())
return pd.read_csv(data, sep=sep)
if __name__ == "__main__":
g = GoogleDriveDao()
print('test over')
| 33.857143
| 92
| 0.562671
|
c8595b0aa6f057d8c00b1053e72bdff06bfea5b2
| 1,472
|
py
|
Python
|
mne/preprocessing/__init__.py
|
cverrier/mne-python
|
6763cac6b4b3d379a9e5319dd3575f1f124c0060
|
[
"BSD-3-Clause"
] | 1,953
|
2015-01-17T20:33:46.000Z
|
2022-03-30T04:36:34.000Z
|
mne/preprocessing/__init__.py
|
cverrier/mne-python
|
6763cac6b4b3d379a9e5319dd3575f1f124c0060
|
[
"BSD-3-Clause"
] | 8,490
|
2015-01-01T13:04:18.000Z
|
2022-03-31T23:02:08.000Z
|
mne/preprocessing/__init__.py
|
cverrier/mne-python
|
6763cac6b4b3d379a9e5319dd3575f1f124c0060
|
[
"BSD-3-Clause"
] | 1,130
|
2015-01-08T22:39:27.000Z
|
2022-03-30T21:44:26.000Z
|
"""Preprocessing with artifact detection, SSP, and ICA."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
from .flat import annotate_flat
from .maxfilter import apply_maxfilter
from .ssp import compute_proj_ecg, compute_proj_eog
from .eog import find_eog_events, create_eog_epochs
from .ecg import find_ecg_events, create_ecg_epochs
from .ica import (ICA, ica_find_eog_events, ica_find_ecg_events,
get_score_funcs, read_ica, corrmap, read_ica_eeglab)
from .otp import oversampled_temporal_projection
from ._peak_finder import peak_finder
from .infomax_ import infomax
from .stim import fix_stim_artifact
from .maxwell import (maxwell_filter, find_bad_channels_maxwell,
compute_maxwell_basis)
from .realign import realign_raw
from .xdawn import Xdawn
from ._csd import compute_current_source_density
from . import nirs
from .artifact_detection import (annotate_movement, compute_average_dev_head_t,
annotate_muscle_zscore, annotate_break)
from ._regress import regress_artifact
from ._fine_cal import (compute_fine_calibration, read_fine_calibration,
write_fine_calibration)
from .annotate_nan import annotate_nan
from .interpolate import equalize_bads
from . import ieeg
| 42.057143
| 79
| 0.768342
|
eeb3209f65917bc8d51bd2ef69a74d115e03fb4c
| 1,354
|
py
|
Python
|
signals/models.py
|
fr33ky/signalserver
|
ce360cd89732c9d9270d7af04e38e55f6570d6a7
|
[
"MIT"
] | 23
|
2016-03-24T00:31:47.000Z
|
2022-02-10T21:27:53.000Z
|
signals/models.py
|
fr33ky/signalserver
|
ce360cd89732c9d9270d7af04e38e55f6570d6a7
|
[
"MIT"
] | 148
|
2016-04-03T00:22:55.000Z
|
2020-08-01T20:08:03.000Z
|
signals/models.py
|
fr33ky/signalserver
|
ce360cd89732c9d9270d7af04e38e55f6570d6a7
|
[
"MIT"
] | 11
|
2016-04-24T03:31:31.000Z
|
2019-09-03T16:51:08.000Z
|
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Process(models.Model):
file_id = models.IntegerField(default=0)
file_name = models.CharField(max_length=400)
processed_time = models.DateTimeField()
user_name = models.CharField(max_length=100)
policy_name = models.CharField(max_length=100)
policy_id = models.IntegerField(default=0)
shared = models.BooleanField(default=True)
status = models.BooleanField(default=False)
frame_count = models.IntegerField(default=0)
class Output(models.Model):
process = models.ForeignKey(
Process, on_delete=models.CASCADE)
file_name = models.CharField(max_length=400)
op_id = models.IntegerField(default=0)
signal_name = models.CharField(max_length=400)
status = models.BooleanField(default=False)
task_id = models.CharField(max_length=200)
frame_count = models.IntegerField(default=0)
class Signal(models.Model):
output = models.ForeignKey(
Output, on_delete=models.CASCADE)
index = models.IntegerField(default=0)
signal_values = ArrayField(
ArrayField(
models.FloatField(blank=True),
),
)
frame_times = ArrayField(
ArrayField(
models.FloatField(blank=True),
),
)
frame_count = models.IntegerField(default=0)
| 31.488372
| 53
| 0.708272
|
f4237d5c54e03adcd1f07f4747e7ec63ddc27746
| 23,612
|
py
|
Python
|
sdks/python/apache_beam/coders/coders_test_common.py
|
rionmonster/beam
|
dcf3e789ae9e632b10e5a7c7ac2b8d4c07131e09
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/coders/coders_test_common.py
|
rionmonster/beam
|
dcf3e789ae9e632b10e5a7c7ac2b8d4c07131e09
|
[
"Apache-2.0"
] | 1
|
2019-05-18T01:40:53.000Z
|
2019-05-21T16:25:26.000Z
|
sdks/python/apache_beam/coders/coders_test_common.py
|
rionmonster/beam
|
dcf3e789ae9e632b10e5a7c7ac2b8d4c07131e09
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests common to all coder implementations."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import math
import sys
import unittest
from builtins import range
from typing import Any
from typing import List
import pytest
from apache_beam.coders import proto2_coder_test_messages_pb2 as test_message
from apache_beam.coders import coders
from apache_beam.coders import typecoders
from apache_beam.internal import pickler
from apache_beam.runners import pipeline_context
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.transforms.window import GlobalWindow
from apache_beam.typehints import sharded_key_type
from apache_beam.typehints import typehints
from apache_beam.utils import timestamp
from apache_beam.utils import windowed_value
from apache_beam.utils.sharded_key import ShardedKey
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from . import observable
# Defined out of line for picklability.
class CustomCoder(coders.Coder):
def encode(self, x):
return str(x + 1).encode('utf-8')
def decode(self, encoded):
return int(encoded) - 1
# These tests need to all be run in the same process due to the asserts
# in tearDownClass.
@pytest.mark.no_xdist
class CodersTest(unittest.TestCase):
# These class methods ensure that we test each defined coder in both
# nested and unnested context.
# Common test values representing Python's built-in types.
test_values_deterministic: List[Any] = [
None,
1,
-1,
1.5,
b'str\0str',
u'unicode\0\u0101',
(),
(1, 2, 3),
[],
[1, 2, 3],
True,
False,
]
test_values = test_values_deterministic + [
dict(),
{
'a': 'b'
},
{
0: dict(), 1: len
},
set(),
{'a', 'b'},
len,
]
@classmethod
def setUpClass(cls):
cls.seen = set()
cls.seen_nested = set()
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
@classmethod
def tearDownClass(cls):
standard = set(
c for c in coders.__dict__.values() if isinstance(c, type) and
issubclass(c, coders.Coder) and 'Base' not in c.__name__)
standard -= set([
coders.Coder,
coders.AvroGenericCoder,
coders.DeterministicProtoCoder,
coders.FastCoder,
coders.ProtoCoder,
coders.ToBytesCoder
])
cls.seen_nested -= set([coders.ProtoCoder, CustomCoder])
assert not standard - cls.seen, str(standard - cls.seen)
assert not cls.seen_nested - standard, str(cls.seen_nested - standard)
@classmethod
def _observe(cls, coder):
cls.seen.add(type(coder))
cls._observe_nested(coder)
@classmethod
def _observe_nested(cls, coder):
if isinstance(coder, coders.TupleCoder):
for c in coder.coders():
cls.seen_nested.add(type(c))
cls._observe_nested(c)
def check_coder(self, coder, *values, **kwargs):
context = kwargs.pop('context', pipeline_context.PipelineContext())
test_size_estimation = kwargs.pop('test_size_estimation', True)
assert not kwargs
self._observe(coder)
for v in values:
self.assertEqual(v, coder.decode(coder.encode(v)))
if test_size_estimation:
self.assertEqual(coder.estimate_size(v), len(coder.encode(v)))
self.assertEqual(
coder.estimate_size(v), coder.get_impl().estimate_size(v))
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(v),
(coder.get_impl().estimate_size(v), []))
copy1 = pickler.loads(pickler.dumps(coder))
copy2 = coders.Coder.from_runner_api(coder.to_runner_api(context), context)
for v in values:
self.assertEqual(v, copy1.decode(copy2.encode(v)))
if coder.is_deterministic():
self.assertEqual(copy1.encode(v), copy2.encode(v))
def test_custom_coder(self):
self.check_coder(CustomCoder(), 1, -10, 5)
self.check_coder(
coders.TupleCoder((CustomCoder(), coders.BytesCoder())), (1, b'a'),
(-10, b'b'), (5, b'c'))
def test_pickle_coder(self):
coder = coders.PickleCoder()
self.check_coder(coder, *self.test_values)
def test_deterministic_coder(self):
coder = coders.FastPrimitivesCoder()
deterministic_coder = coders.DeterministicFastPrimitivesCoder(coder, 'step')
self.check_coder(deterministic_coder, *self.test_values_deterministic)
for v in self.test_values_deterministic:
self.check_coder(coders.TupleCoder((deterministic_coder, )), (v, ))
self.check_coder(
coders.TupleCoder(
(deterministic_coder, ) * len(self.test_values_deterministic)),
tuple(self.test_values_deterministic))
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, dict())
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, [1, dict()])
self.check_coder(
coders.TupleCoder((deterministic_coder, coder)), (1, dict()),
('a', [dict()]))
def test_dill_coder(self):
cell_value = (lambda x: lambda: x)(0).__closure__[0]
self.check_coder(coders.DillCoder(), 'a', 1, cell_value)
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), coders.DillCoder())),
(1, cell_value))
def test_fast_primitives_coder(self):
coder = coders.FastPrimitivesCoder(coders.SingletonCoder(len))
self.check_coder(coder, *self.test_values)
for v in self.test_values:
self.check_coder(coders.TupleCoder((coder, )), (v, ))
def test_fast_primitives_coder_large_int(self):
coder = coders.FastPrimitivesCoder()
self.check_coder(coder, 10**100)
def test_bytes_coder(self):
self.check_coder(coders.BytesCoder(), b'a', b'\0', b'z' * 1000)
def test_bool_coder(self):
self.check_coder(coders.BooleanCoder(), True, False)
def test_varint_coder(self):
# Small ints.
self.check_coder(coders.VarIntCoder(), *range(-10, 10))
# Multi-byte encoding starts at 128
self.check_coder(coders.VarIntCoder(), *range(120, 140))
# Large values
MAX_64_BIT_INT = 0x7fffffffffffffff
self.check_coder(
coders.VarIntCoder(),
*[
int(math.pow(-1, k) * math.exp(k))
for k in range(0, int(math.log(MAX_64_BIT_INT)))
])
def test_float_coder(self):
self.check_coder(
coders.FloatCoder(), *[float(0.1 * x) for x in range(-100, 100)])
self.check_coder(
coders.FloatCoder(), *[float(2**(0.1 * x)) for x in range(-100, 100)])
self.check_coder(coders.FloatCoder(), float('-Inf'), float('Inf'))
self.check_coder(
coders.TupleCoder((coders.FloatCoder(), coders.FloatCoder())), (0, 1),
(-100, 100), (0.5, 0.25))
def test_singleton_coder(self):
a = 'anything'
b = 'something else'
self.check_coder(coders.SingletonCoder(a), a)
self.check_coder(coders.SingletonCoder(b), b)
self.check_coder(
coders.TupleCoder((coders.SingletonCoder(a), coders.SingletonCoder(b))),
(a, b))
def test_interval_window_coder(self):
self.check_coder(
coders.IntervalWindowCoder(),
*[
window.IntervalWindow(x, y) for x in [-2**52, 0, 2**52]
for y in range(-100, 100)
])
self.check_coder(
coders.TupleCoder((coders.IntervalWindowCoder(), )),
(window.IntervalWindow(0, 10), ))
def test_timestamp_coder(self):
self.check_coder(
coders.TimestampCoder(),
*[timestamp.Timestamp(micros=x) for x in (-1000, 0, 1000)])
self.check_coder(
coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567000),
timestamp.Timestamp(micros=1234567000))
self.check_coder(
coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567890123456000),
timestamp.Timestamp(micros=1234567890123456000))
self.check_coder(
coders.TupleCoder((coders.TimestampCoder(), coders.BytesCoder())),
(timestamp.Timestamp.of(27), b'abc'))
def test_timer_coder(self):
self.check_coder(
coders._TimerCoder(coders.StrUtf8Coder(), coders.GlobalWindowCoder()),
*[
userstate.Timer(
user_key="key",
dynamic_timer_tag="tag",
windows=(GlobalWindow(), ),
clear_bit=True,
fire_timestamp=None,
hold_timestamp=None,
paneinfo=None),
userstate.Timer(
user_key="key",
dynamic_timer_tag="tag",
windows=(GlobalWindow(), ),
clear_bit=False,
fire_timestamp=timestamp.Timestamp.of(123),
hold_timestamp=timestamp.Timestamp.of(456),
paneinfo=windowed_value.PANE_INFO_UNKNOWN)
])
def test_tuple_coder(self):
kv_coder = coders.TupleCoder((coders.VarIntCoder(), coders.BytesCoder()))
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:pair',
'is_pair_like': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.BytesCoder().as_cloud_object()
],
},
kv_coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x04abc', kv_coder.encode((4, b'abc')))
# Test unnested
self.check_coder(kv_coder, (1, b'a'), (-2, b'a' * 100), (300, b'abc\0' * 5))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.TupleCoder((coders.PickleCoder(), coders.VarIntCoder())),
coders.StrUtf8Coder(),
coders.BooleanCoder())), ((1, 2), 'a', True),
((-2, 5), u'a\u0101' * 100, False), ((300, 1), 'abc\0' * 5, True))
def test_tuple_sequence_coder(self):
int_tuple_coder = coders.TupleSequenceCoder(coders.VarIntCoder())
self.check_coder(int_tuple_coder, (1, -1, 0), (), tuple(range(1000)))
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), int_tuple_coder)),
(1, (1, 2, 3)))
def test_base64_pickle_coder(self):
self.check_coder(coders.Base64PickleCoder(), 'a', 1, 1.5, (1, 2, 3))
def test_utf8_coder(self):
self.check_coder(coders.StrUtf8Coder(), 'a', u'ab\u00FF', u'\u0101\0')
def test_iterable_coder(self):
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:stream',
'is_stream_like': True,
'component_encodings': [coders.VarIntCoder().as_cloud_object()]
},
iterable_coder.as_cloud_object())
# Test unnested
self.check_coder(iterable_coder, [1], [-1, 0, 100])
# Test nested
self.check_coder(
coders.TupleCoder(
(coders.VarIntCoder(), coders.IterableCoder(coders.VarIntCoder()))),
(1, [1, 2, 3]))
def test_iterable_coder_unknown_length(self):
# Empty
self._test_iterable_coder_of_unknown_length(0)
# Single element
self._test_iterable_coder_of_unknown_length(1)
# Multiple elements
self._test_iterable_coder_of_unknown_length(100)
# Multiple elements with underlying stream buffer overflow.
self._test_iterable_coder_of_unknown_length(80000)
def _test_iterable_coder_of_unknown_length(self, count):
def iter_generator(count):
for i in range(count):
yield i
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
self.assertCountEqual(
list(iter_generator(count)),
iterable_coder.decode(iterable_coder.encode(iter_generator(count))))
def test_windowedvalue_coder_paneinfo(self):
coder = coders.WindowedValueCoder(
coders.VarIntCoder(), coders.GlobalWindowCoder())
test_paneinfo_values = [
windowed_value.PANE_INFO_UNKNOWN,
windowed_value.PaneInfo(
True, True, windowed_value.PaneInfoTiming.EARLY, 0, -1),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 0, 0),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 10, 0),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 0, 23),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 12, 23),
windowed_value.PaneInfo(
False, False, windowed_value.PaneInfoTiming.LATE, 0, 123),
]
test_values = [
windowed_value.WindowedValue(123, 234, (GlobalWindow(), ), p)
for p in test_paneinfo_values
]
# Test unnested.
self.check_coder(
coder,
windowed_value.WindowedValue(
123, 234, (GlobalWindow(), ), windowed_value.PANE_INFO_UNKNOWN))
for value in test_values:
self.check_coder(coder, value)
# Test nested.
for value1 in test_values:
for value2 in test_values:
self.check_coder(coders.TupleCoder((coder, coder)), (value1, value2))
def test_windowed_value_coder(self):
coder = coders.WindowedValueCoder(
coders.VarIntCoder(), coders.GlobalWindowCoder())
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:windowed_value',
'is_wrapper': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.GlobalWindowCoder().as_cloud_object(),
],
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(
b'\x7f\xdf;dZ\x1c\xac\t\x00\x00\x00\x01\x0f\x01',
coder.encode(window.GlobalWindows.windowed_value(1)))
# Test decoding large timestamp
self.assertEqual(
coder.decode(b'\x7f\xdf;dZ\x1c\xac\x08\x00\x00\x00\x01\x0f\x00'),
windowed_value.create(0, MIN_TIMESTAMP.micros, (GlobalWindow(), )))
# Test unnested
self.check_coder(
coders.WindowedValueCoder(coders.VarIntCoder()),
windowed_value.WindowedValue(3, -100, ()),
windowed_value.WindowedValue(-1, 100, (1, 2, 3)))
# Test Global Window
self.check_coder(
coders.WindowedValueCoder(
coders.VarIntCoder(), coders.GlobalWindowCoder()),
window.GlobalWindows.windowed_value(1))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.WindowedValueCoder(coders.FloatCoder()),
coders.WindowedValueCoder(coders.StrUtf8Coder()))),
(
windowed_value.WindowedValue(1.5, 0, ()),
windowed_value.WindowedValue("abc", 10, ('window', ))))
def test_param_windowed_value_coder(self):
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils.windowed_value import PaneInfo
wv = windowed_value.create(
b'',
# Milliseconds to microseconds
1000 * 1000,
(IntervalWindow(11, 21), ),
PaneInfo(True, False, 1, 2, 3))
windowed_value_coder = coders.WindowedValueCoder(
coders.BytesCoder(), coders.IntervalWindowCoder())
payload = windowed_value_coder.encode(wv)
coder = coders.ParamWindowedValueCoder(
payload, [coders.VarIntCoder(), coders.IntervalWindowCoder()])
# Test binary representation
self.assertEqual(
b'\x01', coder.encode(window.GlobalWindows.windowed_value(1)))
# Test unnested
self.check_coder(
coders.ParamWindowedValueCoder(
payload, [coders.VarIntCoder(), coders.IntervalWindowCoder()]),
windowed_value.WindowedValue(
3,
1, (window.IntervalWindow(11, 21), ),
PaneInfo(True, False, 1, 2, 3)),
windowed_value.WindowedValue(
1,
1, (window.IntervalWindow(11, 21), ),
PaneInfo(True, False, 1, 2, 3)))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.ParamWindowedValueCoder(
payload, [coders.FloatCoder(), coders.IntervalWindowCoder()]),
coders.ParamWindowedValueCoder(
payload,
[coders.StrUtf8Coder(), coders.IntervalWindowCoder()]))),
(
windowed_value.WindowedValue(
1.5,
1, (window.IntervalWindow(11, 21), ),
PaneInfo(True, False, 1, 2, 3)),
windowed_value.WindowedValue(
"abc",
1, (window.IntervalWindow(11, 21), ),
PaneInfo(True, False, 1, 2, 3))))
def test_proto_coder(self):
# For instructions on how these test proto message were generated,
# see coders_test.py
ma = test_message.MessageA()
mab = ma.field2.add()
mab.field1 = True
ma.field1 = u'hello world'
mb = test_message.MessageA()
mb.field1 = u'beam'
proto_coder = coders.ProtoCoder(ma.__class__)
self.check_coder(proto_coder, ma)
self.check_coder(
coders.TupleCoder((proto_coder, coders.BytesCoder())), (ma, b'a'),
(mb, b'b'))
def test_global_window_coder(self):
coder = coders.GlobalWindowCoder()
value = window.GlobalWindow()
# Verify cloud object representation
self.assertEqual({'@type': 'kind:global_window'}, coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'', coder.encode(value))
self.assertEqual(value, coder.decode(b''))
# Test unnested
self.check_coder(coder, value)
# Test nested
self.check_coder(coders.TupleCoder((coder, coder)), (value, value))
def test_length_prefix_coder(self):
coder = coders.LengthPrefixCoder(coders.BytesCoder())
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:length_prefix',
'component_encodings': [coders.BytesCoder().as_cloud_object()]
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x00', coder.encode(b''))
self.assertEqual(b'\x01a', coder.encode(b'a'))
self.assertEqual(b'\x02bc', coder.encode(b'bc'))
self.assertEqual(b'\xff\x7f' + b'z' * 16383, coder.encode(b'z' * 16383))
# Test unnested
self.check_coder(coder, b'', b'a', b'bc', b'def')
# Test nested
self.check_coder(
coders.TupleCoder((coder, coder)), (b'', b'a'), (b'bc', b'def'))
def test_nested_observables(self):
class FakeObservableIterator(observable.ObservableMixin):
def __iter__(self):
return iter([1, 2, 3])
# Coder for elements from the observable iterator.
elem_coder = coders.VarIntCoder()
iter_coder = coders.TupleSequenceCoder(elem_coder)
# Test nested WindowedValue observable.
coder = coders.WindowedValueCoder(iter_coder)
observ = FakeObservableIterator()
value = windowed_value.WindowedValue(observ, 0, ())
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(value)[1],
[(observ, elem_coder.get_impl())])
# Test nested tuple observable.
coder = coders.TupleCoder((coders.StrUtf8Coder(), iter_coder))
value = (u'123', observ)
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(value)[1],
[(observ, elem_coder.get_impl())])
def test_state_backed_iterable_coder(self):
# pylint: disable=global-variable-undefined
# required for pickling by reference
global state
state = {}
def iterable_state_write(values, element_coder_impl):
token = b'state_token_%d' % len(state)
state[token] = [element_coder_impl.encode(e) for e in values]
return token
def iterable_state_read(token, element_coder_impl):
return [element_coder_impl.decode(s) for s in state[token]]
coder = coders.StateBackedIterableCoder(
coders.VarIntCoder(),
read_state=iterable_state_read,
write_state=iterable_state_write,
write_state_threshold=1)
context = pipeline_context.PipelineContext(
iterable_state_read=iterable_state_read,
iterable_state_write=iterable_state_write)
self.check_coder(
coder, [1, 2, 3], context=context, test_size_estimation=False)
# Ensure that state was actually used.
self.assertNotEqual(state, {})
self.check_coder(
coders.TupleCoder((coder, coder)), ([1], [2, 3]),
context=context,
test_size_estimation=False)
def test_nullable_coder(self):
self.check_coder(coders.NullableCoder(coders.VarIntCoder()), None, 2 * 64)
def test_map_coder(self):
self.check_coder(
coders.MapCoder(coders.VarIntCoder(), coders.StrUtf8Coder()), {
1: "one", 300: "three hundred"
}, {}, {i: str(i)
for i in range(5000)})
def test_sharded_key_coder(self):
key_and_coders = [(b'', b'\x00', coders.BytesCoder()),
(b'key', b'\x03key', coders.BytesCoder()),
('key', b'\03\x6b\x65\x79', coders.StrUtf8Coder()),
(('k', 1),
b'\x01\x6b\x01',
coders.TupleCoder(
(coders.StrUtf8Coder(), coders.VarIntCoder())))]
for key, bytes_repr, key_coder in key_and_coders:
coder = coders.ShardedKeyCoder(key_coder)
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:sharded_key',
'component_encodings': [key_coder.as_cloud_object()]
},
coder.as_cloud_object())
self.assertEqual(b'\x00' + bytes_repr, coder.encode(ShardedKey(key, b'')))
self.assertEqual(
b'\x03123' + bytes_repr, coder.encode(ShardedKey(key, b'123')))
# Test unnested
self.check_coder(coder, ShardedKey(key, b''))
self.check_coder(coder, ShardedKey(key, b'123'))
# Test type hints
self.assertTrue(
isinstance(
coder.to_type_hint(), sharded_key_type.ShardedKeyTypeConstraint))
key_type = coder.to_type_hint().key_type
if isinstance(key_type, typehints.TupleConstraint):
self.assertEqual(key_type.tuple_types, (type(key[0]), type(key[1])))
else:
self.assertEqual(key_type, type(key))
self.assertEqual(
coders.ShardedKeyCoder.from_type_hint(
coder.to_type_hint(), typecoders.CoderRegistry()),
coder)
for other_key, _, other_key_coder in key_and_coders:
other_coder = coders.ShardedKeyCoder(other_key_coder)
# Test nested
self.check_coder(
coders.TupleCoder((coder, other_coder)),
(ShardedKey(key, b''), ShardedKey(other_key, b'')))
self.check_coder(
coders.TupleCoder((coder, other_coder)),
(ShardedKey(key, b'123'), ShardedKey(other_key, b'')))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 35.667674
| 80
| 0.652507
|
9778c920c0ef45cdf45f90df31e2eec510357a6c
| 814
|
py
|
Python
|
codenerix_products/migrations/0009_auto_20180201_1137.py
|
centrologic/codenerix_products
|
d482ffe5a3a2d68ccc00170b8f65ce5fe7c2dff8
|
[
"Apache-2.0"
] | 2
|
2018-09-03T08:57:25.000Z
|
2020-11-03T03:42:31.000Z
|
codenerix_products/migrations/0009_auto_20180201_1137.py
|
centrologic/codenerix_products
|
d482ffe5a3a2d68ccc00170b8f65ce5fe7c2dff8
|
[
"Apache-2.0"
] | 1
|
2017-05-03T08:45:37.000Z
|
2017-05-03T08:45:37.000Z
|
codenerix_products/migrations/0009_auto_20180201_1137.py
|
centrologic/codenerix_products
|
d482ffe5a3a2d68ccc00170b8f65ce5fe7c2dff8
|
[
"Apache-2.0"
] | 1
|
2018-05-22T10:00:48.000Z
|
2018-05-22T10:00:48.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-01 10:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0008_auto_20180126_1711'),
]
operations = [
migrations.AlterField(
model_name='productunique',
name='box',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_unique', to='codenerix_storages.StorageBox', verbose_name='Box'),
),
migrations.AlterField(
model_name='productunique',
name='stock_original',
field=models.FloatField(default=0, verbose_name='Stock original'),
),
]
| 30.148148
| 169
| 0.657248
|
987aafff4acfb055e64aa7484b46f2871b507024
| 788
|
py
|
Python
|
src/nexuscli/exception.py
|
TimurPlusPlus/nexus3-cli
|
5c4dc4393ccdc07ad3769e8bb44f3fb20857ffa8
|
[
"MIT"
] | null | null | null |
src/nexuscli/exception.py
|
TimurPlusPlus/nexus3-cli
|
5c4dc4393ccdc07ad3769e8bb44f3fb20857ffa8
|
[
"MIT"
] | null | null | null |
src/nexuscli/exception.py
|
TimurPlusPlus/nexus3-cli
|
5c4dc4393ccdc07ad3769e8bb44f3fb20857ffa8
|
[
"MIT"
] | null | null | null |
class NexusClientAPIError(Exception):
"""Unexpected response from Nexus service."""
pass
class NexusClientInvalidCredentials(Exception):
"""
Login credentials not accepted by Nexus service. Usually the result of a
HTTP 401 response.
"""
pass
class NexusClientInvalidRepositoryPath(Exception):
"""
Used when an operation against the Nexus service uses an invalid or
non-existent path.
"""
pass
class NexusClientInvalidRepository(Exception):
"""The given repository does not exist in Nexus."""
pass
class NexusClientCreateRepositoryError(Exception):
"""Used when a repository creation operation in Nexus fails."""
pass
class DownloadError(Exception):
"""Error retrieving artefact from Nexus service."""
pass
| 22.514286
| 76
| 0.717005
|
69f47cb69536860080a6311496837e20581840b1
| 3,383
|
py
|
Python
|
alipay/aop/api/response/AlipayEbppJfexportInstbillQueryResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayEbppJfexportInstbillQueryResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayEbppJfexportInstbillQueryResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.JfExportInstBillModel import JfExportInstBillModel
class AlipayEbppJfexportInstbillQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppJfexportInstbillQueryResponse, self).__init__()
self._bill_key = None
self._biz_type = None
self._cache_key = None
self._charge_inst = None
self._charge_mode = None
self._extend_field = None
self._inst_bills = None
self._owner_name = None
self._sub_biz_type = None
@property
def bill_key(self):
return self._bill_key
@bill_key.setter
def bill_key(self, value):
self._bill_key = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def cache_key(self):
return self._cache_key
@cache_key.setter
def cache_key(self, value):
self._cache_key = value
@property
def charge_inst(self):
return self._charge_inst
@charge_inst.setter
def charge_inst(self, value):
self._charge_inst = value
@property
def charge_mode(self):
return self._charge_mode
@charge_mode.setter
def charge_mode(self, value):
self._charge_mode = value
@property
def extend_field(self):
return self._extend_field
@extend_field.setter
def extend_field(self, value):
self._extend_field = value
@property
def inst_bills(self):
return self._inst_bills
@inst_bills.setter
def inst_bills(self, value):
if isinstance(value, list):
self._inst_bills = list()
for i in value:
if isinstance(i, JfExportInstBillModel):
self._inst_bills.append(i)
else:
self._inst_bills.append(JfExportInstBillModel.from_alipay_dict(i))
@property
def owner_name(self):
return self._owner_name
@owner_name.setter
def owner_name(self, value):
self._owner_name = value
@property
def sub_biz_type(self):
return self._sub_biz_type
@sub_biz_type.setter
def sub_biz_type(self, value):
self._sub_biz_type = value
def parse_response_content(self, response_content):
response = super(AlipayEbppJfexportInstbillQueryResponse, self).parse_response_content(response_content)
if 'bill_key' in response:
self.bill_key = response['bill_key']
if 'biz_type' in response:
self.biz_type = response['biz_type']
if 'cache_key' in response:
self.cache_key = response['cache_key']
if 'charge_inst' in response:
self.charge_inst = response['charge_inst']
if 'charge_mode' in response:
self.charge_mode = response['charge_mode']
if 'extend_field' in response:
self.extend_field = response['extend_field']
if 'inst_bills' in response:
self.inst_bills = response['inst_bills']
if 'owner_name' in response:
self.owner_name = response['owner_name']
if 'sub_biz_type' in response:
self.sub_biz_type = response['sub_biz_type']
| 29.938053
| 112
| 0.645285
|
9ec9843ae23b8ce9a1e33f44dcd31ccaaded593a
| 9,869
|
py
|
Python
|
oletools/common/log_helper/log_helper.py
|
mengualp/oletools
|
bf83ed4037906ad4097f1eb7968ef0274f4694f8
|
[
"BSD-2-Clause"
] | null | null | null |
oletools/common/log_helper/log_helper.py
|
mengualp/oletools
|
bf83ed4037906ad4097f1eb7968ef0274f4694f8
|
[
"BSD-2-Clause"
] | null | null | null |
oletools/common/log_helper/log_helper.py
|
mengualp/oletools
|
bf83ed4037906ad4097f1eb7968ef0274f4694f8
|
[
"BSD-2-Clause"
] | null | null | null |
"""
log_helper.py
General logging helpers
Use as follows:
# at the start of your file:
# import logging <-- replace this with next line
from oletools.common.log_helper import log_helper
logger = log_helper.get_or_create_silent_logger("module_name")
def enable_logging():
'''Enable logging in this module; for use by importing scripts'''
logger.setLevel(log_helper.NOTSET)
imported_oletool_module.enable_logging()
other_imported_oletool_module.enable_logging()
# ... your code; use logger instead of logging ...
def main():
log_helper.enable_logging(level=...) # instead of logging.basicConfig
# ... your main code ...
log_helper.end_logging()
.. codeauthor:: Intra2net AG <info@intra2net>, Philippe Lagadec
"""
# === LICENSE =================================================================
# oletools is copyright (c) 2012-2021, Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
# CHANGELOG:
# 2017-12-07 v0.01 CH: - first version
# 2018-02-05 v0.02 SA: - fixed log level selection and reformatted code
# 2018-02-06 v0.03 SA: - refactored code to deal with NullHandlers
# 2018-02-07 v0.04 SA: - fixed control of handlers propagation
# 2018-04-23 v0.05 SA: - refactored the whole logger to use an OOP approach
# 2021-05-17 v0.60 PL: - added default values for enable_logging parameters
# -----------------------------------------------------------------------------
# TODO:
from __future__ import print_function
from ._json_formatter import JsonFormatter
from ._logger_adapter import OletoolsLoggerAdapter
from . import _root_logger_wrapper
from ..io_encoding import ensure_stdout_handles_unicode
import logging
import sys
LOG_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
#: provide this constant to modules, so they do not have to import
#: :py:mod:`logging` for themselves just for this one constant.
NOTSET = logging.NOTSET
DEFAULT_LOGGER_NAME = 'oletools'
DEFAULT_MESSAGE_FORMAT = '%(levelname)-8s %(message)s'
class LogHelper:
"""
Single helper class that creates and remembers loggers.
"""
#: for convenience: here again (see also :py:data:`log_helper.NOTSET`)
NOTSET = logging.NOTSET
def __init__(self):
self._all_names = set() # set so we do not have duplicates
self._use_json = False
self._is_enabled = False
self._target_stream = None
def get_or_create_silent_logger(self, name=DEFAULT_LOGGER_NAME, level=logging.CRITICAL + 1):
"""
Get a logger or create one if it doesn't exist, setting a NullHandler
as the handler (to avoid printing to the console).
By default we also use a higher logging level so every message will
be ignored.
This will prevent oletools from logging unnecessarily when being imported
from external tools.
"""
return self._get_or_create_logger(name, level, logging.NullHandler())
def enable_logging(self, use_json=False, level='warning', log_format=DEFAULT_MESSAGE_FORMAT, stream=None,
other_logger_has_first_line=False):
"""
This function initializes the root logger and enables logging.
We set the level of the root logger to the one passed by calling logging.basicConfig.
We also set the level of every logger we created to 0 (logging.NOTSET), meaning that
the level of the root logger will be used to tell if messages should be logged.
Additionally, since our loggers use the NullHandler, they won't log anything themselves,
but due to having propagation enabled they will pass messages to the root logger,
which in turn will log to the stream set in this function.
Since the root logger is the one doing the work, when using JSON we set its formatter
so that every message logged is JSON-compatible.
If other code also creates json output, all items should be pre-pended
with a comma like the `JsonFormatter` does. Except the first; use param
`other_logger_has_first_line` to clarify whether our logger or the
other code will produce the first json item.
"""
if self._is_enabled:
raise ValueError('re-enabling logging. Not sure whether that is ok...')
if stream is None:
self.target_stream = sys.stdout
else:
self.target_stream = stream
if self.target_stream == sys.stdout:
ensure_stdout_handles_unicode()
log_level = LOG_LEVELS[level]
logging.basicConfig(level=log_level, format=log_format,
stream=self.target_stream)
self._is_enabled = True
self._use_json = use_json
sys.excepthook = self._get_except_hook(sys.excepthook)
# since there could be loggers already created we go through all of them
# and set their levels to 0 so they will use the root logger's level
for name in self._all_names:
logger = self.get_or_create_silent_logger(name)
self._set_logger_level(logger, logging.NOTSET)
# add a JSON formatter to the root logger, which will be used by every logger
if self._use_json:
_root_logger_wrapper.set_formatter(JsonFormatter(other_logger_has_first_line))
print('[', file=self.target_stream)
def end_logging(self):
"""
Must be called at the end of the main function if the caller wants
json-compatible output
"""
if not self._is_enabled:
return
self._is_enabled = False
# end logging
self._all_names = set()
logging.shutdown()
# end json list
if self._use_json:
print(']', file=self.target_stream)
self._use_json = False
def _get_except_hook(self, old_hook):
"""
Global hook for exceptions so we can always end logging.
We wrap any hook currently set to avoid overwriting global hooks set by oletools.
Note that this is only called by enable_logging, which in turn is called by
the main() function in oletools' scripts. When scripts are being imported this
code won't execute and won't affect global hooks.
"""
def hook(exctype, value, traceback):
self.end_logging()
old_hook(exctype, value, traceback)
return hook
def _get_or_create_logger(self, name, level, handler=None):
"""
Get or create a new logger. This newly created logger will have the
handler and level that was passed, but if it already exists it's not changed.
We also wrap the logger in an adapter so we can easily extend its functionality.
"""
# logging.getLogger creates a logger if it doesn't exist,
# so we need to check before calling it
if handler and not self._log_exists(name):
logger = logging.getLogger(name)
logger.addHandler(handler)
self._set_logger_level(logger, level)
else:
logger = logging.getLogger(name)
# Keep track of every logger we created so we can easily change
# their levels whenever needed
self._all_names.add(name)
adapted_logger = OletoolsLoggerAdapter(logger, None)
adapted_logger.set_json_enabled_function(lambda: self._use_json)
return adapted_logger
@staticmethod
def _set_logger_level(logger, level):
"""
If the logging is already initialized, we set the level of our logger
to 0, meaning that it will reuse the level of the root logger.
That means that if the root logger level changes, we will keep using
its level and not logging unnecessarily.
"""
# if this log was wrapped, unwrap it to set the level
if isinstance(logger, OletoolsLoggerAdapter):
logger = logger.logger
if _root_logger_wrapper.is_logging_initialized():
logger.setLevel(logging.NOTSET)
else:
logger.setLevel(level)
@staticmethod
def _log_exists(name):
"""
We check the log manager instead of our global _all_names variable
since the logger could have been created outside of the helper
"""
return name in logging.Logger.manager.loggerDict
| 39.794355
| 109
| 0.672206
|
dea45fe9730c8929cc11bddbecc0d318ce160945
| 2,701
|
py
|
Python
|
squareconnect/models/measurement_unit_area.py
|
shaminmeerankutty/connect-python-sdk
|
524c8fe344bc3c0340833984970a07d519c4f5be
|
[
"Apache-2.0"
] | 53
|
2016-08-06T17:12:16.000Z
|
2020-08-02T19:43:58.000Z
|
squareconnect/models/measurement_unit_area.py
|
shaminmeerankutty/connect-python-sdk
|
524c8fe344bc3c0340833984970a07d519c4f5be
|
[
"Apache-2.0"
] | 32
|
2016-08-19T16:32:30.000Z
|
2020-01-14T18:01:37.000Z
|
squareconnect/models/measurement_unit_area.py
|
shaminmeerankutty/connect-python-sdk
|
524c8fe344bc3c0340833984970a07d519c4f5be
|
[
"Apache-2.0"
] | 45
|
2016-09-05T11:58:09.000Z
|
2020-11-15T16:26:41.000Z
|
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class MeasurementUnitArea(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
MeasurementUnitArea - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.135417
| 77
| 0.553499
|
c502a0846594c7b19de5a794d5b5c4e0e93e0e17
| 5,737
|
py
|
Python
|
launch_testing/launch_testing_examples/launch_testing_examples/check_msgs_launch_test.py
|
fujitatomoya/examples
|
35b181ca912258a6c594bedb846a0077a60fcc53
|
[
"Apache-2.0"
] | 335
|
2015-03-12T10:04:35.000Z
|
2022-03-30T00:46:46.000Z
|
launch_testing/launch_testing_examples/launch_testing_examples/check_msgs_launch_test.py
|
AhmedMounir/examples
|
35b181ca912258a6c594bedb846a0077a60fcc53
|
[
"Apache-2.0"
] | 264
|
2015-01-15T23:27:55.000Z
|
2022-03-31T15:20:10.000Z
|
launch_testing/launch_testing_examples/launch_testing_examples/check_msgs_launch_test.py
|
AhmedMounir/examples
|
35b181ca912258a6c594bedb846a0077a60fcc53
|
[
"Apache-2.0"
] | 227
|
2015-05-12T08:01:37.000Z
|
2022-03-31T15:07:17.000Z
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
from threading import Event
from threading import Thread
import unittest
import launch
import launch.actions
import launch_ros.actions
import launch_testing.actions
import launch_testing.markers
import pytest
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from std_msgs.msg import String
@pytest.mark.launch_test
@launch_testing.markers.keep_alive
def generate_test_description():
return launch.LaunchDescription([
launch_ros.actions.Node(
executable='talker',
package='demo_nodes_cpp',
name='demo_node_1'
),
launch_testing.actions.ReadyToTest()
])
class TestFixture(unittest.TestCase):
def test_check_if_msgs_published(self):
with WaitForTopics([('chatter', String)], timeout=5.0):
print('Topic received messages !')
# TODO (adityapande-1995): Move WaitForTopics implementation to launch_testing_ros
# after https://github.com/ros2/rclpy/issues/831 is resolved
class WaitForTopics:
"""
Wait to receive messages on supplied topics.
Example usage:
--------------
from std_msgs.msg import String
# Method 1, using the 'with' keyword
def method_1():
topic_list = [('topic_1', String), ('topic_2', String)]
with WaitForTopics(topic_list, timeout=5.0):
# 'topic_1' and 'topic_2' received at least one message each
print('Given topics are receiving messages !')
# Method 2, calling wait() and shutdown() manually
def method_2():
topic_list = [('topic_1', String), ('topic_2', String)]
wait_for_topics = WaitForTopics(topic_list, timeout=5.0)
assert wait_for_topics.wait()
print('Given topics are receiving messages !')
print(wait_for_topics.topics_not_received()) # Should be an empty set
print(wait_for_topics.topics_received()) # Should be {'topic_1', 'topic_2'}
wait_for_topics.shutdown()
"""
def __init__(self, topic_tuples, timeout=5.0):
self.topic_tuples = topic_tuples
self.timeout = timeout
self.__ros_context = rclpy.Context()
rclpy.init(context=self.__ros_context)
self.__ros_executor = SingleThreadedExecutor(context=self.__ros_context)
self._prepare_ros_node()
# Start spinning
self.__running = True
self.__ros_spin_thread = Thread(target=self._spin_function)
self.__ros_spin_thread.start()
def _prepare_ros_node(self):
node_name = '_test_node_' +\
''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
self.__ros_node = _WaitForTopicsNode(name=node_name, node_context=self.__ros_context)
self.__ros_executor.add_node(self.__ros_node)
def _spin_function(self):
while self.__running:
self.__ros_executor.spin_once(1.0)
def wait(self):
self.__ros_node.start_subscribers(self.topic_tuples)
return self.__ros_node.msg_event_object.wait(self.timeout)
def shutdown(self):
self.__running = False
self.__ros_spin_thread.join()
self.__ros_node.destroy_node()
rclpy.shutdown(context=self.__ros_context)
def topics_received(self):
"""Topics that received at least one message."""
return self.__ros_node.received_topics
def topics_not_received(self):
"""Topics that did not receive any messages."""
return self.__ros_node.expected_topics - self.__ros_node.received_topics
def __enter__(self):
if not self.wait():
raise RuntimeError('Did not receive messages on these topics: ',
self.topics_not_received())
return self
def __exit__(self, exep_type, exep_value, trace):
if exep_type is not None:
raise Exception('Exception occured, value: ', exep_value)
self.shutdown()
class _WaitForTopicsNode(Node):
"""Internal node used for subscribing to a set of topics."""
def __init__(self, name='test_node', node_context=None):
super().__init__(node_name=name, context=node_context)
self.msg_event_object = Event()
def start_subscribers(self, topic_tuples):
self.subscriber_list = []
self.expected_topics = {name for name, _ in topic_tuples}
self.received_topics = set()
for topic_name, topic_type in topic_tuples:
# Create a subscriber
self.subscriber_list.append(
self.create_subscription(
topic_type,
topic_name,
self.callback_template(topic_name),
10
)
)
def callback_template(self, topic_name):
def topic_callback(data):
if topic_name not in self.received_topics:
self.get_logger().debug('Message received for ' + topic_name)
self.received_topics.add(topic_name)
if self.received_topics == self.expected_topics:
self.msg_event_object.set()
return topic_callback
| 34.560241
| 93
| 0.672128
|
3063a4f663f0533d2f82019182febf763afd99c9
| 1,445
|
py
|
Python
|
polarion/base/custom_fields.py
|
anjalizope/python-polarion
|
827fe1da2e07c1833f5cb6dab740d58c2cd97483
|
[
"MIT"
] | 10
|
2021-03-19T07:36:57.000Z
|
2022-02-15T12:28:49.000Z
|
polarion/base/custom_fields.py
|
anjalizope/python-polarion
|
827fe1da2e07c1833f5cb6dab740d58c2cd97483
|
[
"MIT"
] | 29
|
2021-03-04T07:18:52.000Z
|
2022-03-31T07:13:51.000Z
|
polarion/base/custom_fields.py
|
anjalizope/python-polarion
|
827fe1da2e07c1833f5cb6dab740d58c2cd97483
|
[
"MIT"
] | 7
|
2021-03-04T07:15:02.000Z
|
2022-03-29T15:09:13.000Z
|
from abc import ABC
from polarion.base.polarion_object import PolarionObject
class CustomFields(PolarionObject, ABC):
def __init__(self, polarion, project, _id=None, uri=None):
super().__init__(polarion, project, _id, uri)
self.customFields = None
def isCustomFieldAllowed(self, key):
raise NotImplementedError
def setCustomField(self, key, value):
"""
Set the custom field 'key' to the value
:param key: custom field key
:param value: custom field value
:return: None
"""
if not self.isCustomFieldAllowed(key):
raise Exception(f"key {key} is not allowed for this workitem")
if self.customFields is None:
# nothing exists, create a custom field structure
self.customFields = self._polarion.ArrayOfCustomType()
self.customFields.Custom.append(self._polarion.CustomType(key=key, value=value))
else:
custom_field = next(
(custom_field for custom_field in self.customFields.Custom if custom_field["key"] == key), None)
if custom_field is not None:
# custom field is there and we can update the value
custom_field.value = value
else:
# custom field is not there, add it.
self.customFields.Custom.append(self._polarion.CustomType(key=key, value=value))
self.save()
| 38.026316
| 112
| 0.631142
|
0bb40fff6befcec8a15488f362554113302a34fd
| 3,904
|
py
|
Python
|
pushmanager/servlets/pickmerequest.py
|
Mango-J/pushmanager
|
1fcae14de463f6dcd6a847aa45d67613757cba50
|
[
"Apache-2.0"
] | null | null | null |
pushmanager/servlets/pickmerequest.py
|
Mango-J/pushmanager
|
1fcae14de463f6dcd6a847aa45d67613757cba50
|
[
"Apache-2.0"
] | 2
|
2015-02-27T13:56:01.000Z
|
2015-02-27T13:58:27.000Z
|
pushmanager/servlets/pickmerequest.py
|
rockdog/pushmanager
|
5b5b4831da4bc9e660ca1d85e3213a7030919772
|
[
"Apache-2.0"
] | null | null | null |
import sqlalchemy as SA
import pushmanager.core.db as db
import pushmanager.core.util
from pushmanager.core.git import GitQueue
from pushmanager.core.git import GitTaskAction
from pushmanager.core.requesthandler import RequestHandler
class PickMeRequestServlet(RequestHandler):
def post(self):
if not self.current_user:
return self.send_error(403)
self.pushid = pushmanager.core.util.get_int_arg(self.request, 'push')
self.request_ids = self.request.arguments.get('request', [])
db.execute_cb(db.push_pushes.select().where(db.push_pushes.c.id == self.pushid), self.on_push_select)
def on_push_select(self, success, db_results):
if not success or not db_results:
return self.send_error(500)
pushrow = db_results.fetchone()
if not pushrow:
return self.send_error(500)
if pushrow[db.push_pushes.c.state] != 'accepting':
return self.send_error(403)
insert_queries = [
db.push_pushcontents.insert({
'request': int(i),
'push': self.pushid
}) for i in self.request_ids
]
update_query = db.push_requests.update().where(SA.and_(
db.push_requests.c.id.in_(self.request_ids),
db.push_requests.c.state == 'requested',
)).values({'state': 'pickme'})
request_query = db.push_requests.select().where(
db.push_requests.c.id.in_(self.request_ids))
condition_query = SA.select(
[db.push_pushes, db.push_pushcontents],
SA.and_(
db.push_pushcontents.c.request.in_(self.request_ids),
db.push_pushes.c.id == db.push_pushcontents.c.push,
db.push_pushes.c.state != 'discarded'
)
)
def condition_fn(db_results):
return db_results.fetchall() == []
db.execute_transaction_cb(
insert_queries + [update_query, request_query],
self.on_db_complete,
condition=(condition_query, condition_fn)
)
# allow both GET and POST
get = post
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
for request_id in self.request_ids:
GitQueue.enqueue_request(
GitTaskAction.TEST_PICKME_CONFLICT,
request_id,
pushmanager_url=self.get_base_url()
)
class UnpickMeRequestServlet(RequestHandler):
def post(self):
if not self.current_user:
return self.send_error(403)
self.pushid = pushmanager.core.util.get_int_arg(self.request, 'push')
self.request_id = self.request.arguments.get('request', [None])[0]
delete_query = db.push_pushcontents.delete().where(
SA.exists([1], SA.and_(
db.push_pushcontents.c.request == self.request_id,
db.push_pushcontents.c.push == self.pushid,
db.push_requests.c.id == db.push_pushcontents.c.request,
db.push_requests.c.state == 'pickme',
)))
update_query = db.push_requests.update().where(SA.and_(
db.push_requests.c.id == self.request_id,
db.push_requests.c.state == 'pickme',
)).values({'state': 'requested'})
db.execute_transaction_cb([delete_query, update_query], self.on_db_complete)
# allow both GET and POST
get = post
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
# Re-check pickmes that are marked as conflicting, in case this was the pickme
# that they conflicted against.
GitQueue.enqueue_request(
GitTaskAction.TEST_CONFLICTING_PICKMES,
self.pushid,
pushmanager_url=self.get_base_url()
)
| 35.490909
| 109
| 0.620133
|
75c9f3e7ca63fd02ceee5e064b324250a43fb89a
| 14,951
|
py
|
Python
|
Packs/ApiModules/Scripts/CSVFeedApiModule/CSVFeedApiModule.py
|
hoxhunt/content
|
b3a590ef73dfc3b0830ff8ce38e6e7f63292a290
|
[
"MIT"
] | 1
|
2021-08-07T00:21:58.000Z
|
2021-08-07T00:21:58.000Z
|
Packs/ApiModules/Scripts/CSVFeedApiModule/CSVFeedApiModule.py
|
kulaibrown/content
|
bd8914148c59f8151f9a9527020a5386af4de451
|
[
"MIT"
] | 1
|
2022-01-19T13:41:51.000Z
|
2022-01-19T15:00:05.000Z
|
Packs/ApiModules/Scripts/CSVFeedApiModule/CSVFeedApiModule.py
|
kulaibrown/content
|
bd8914148c59f8151f9a9527020a5386af4de451
|
[
"MIT"
] | 1
|
2021-01-05T12:20:30.000Z
|
2021-01-05T12:20:30.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import csv
import gzip
import urllib3
from dateutil.parser import parse
from typing import Optional, Pattern, Dict, Any, Tuple, Union, List
# disable insecure warnings
urllib3.disable_warnings()
# Globals
class Client(BaseClient):
def __init__(self, url: str, feed_url_to_config: Optional[Dict[str, dict]] = None, fieldnames: str = '',
insecure: bool = False, credentials: dict = None, ignore_regex: str = None, encoding: str = 'latin-1',
delimiter: str = ',', doublequote: bool = True, escapechar: str = '',
quotechar: str = '"', skipinitialspace: bool = False, polling_timeout: int = 20, proxy: bool = False,
feedTags: Optional[str] = None, tlp_color: Optional[str] = None, value_field: str = 'value', **kwargs):
"""
:param url: URL of the feed.
:param feed_url_to_config: for each URL, a configuration of the feed that contains
If *null* the values in the first row of the file are used as names. Default: *null*
Example:
feed_url_to_config = {
'https://ipstack.com':
{
'fieldnames': ['value'],
'indicator_type': 'IP',
'mapping': {
'Date': 'date' / 'Date': ('date', r'(regex_string)', 'The date is {}')
}
}
}
For the mapping you can use either:
1. 'indicator_field': 'value_from_feed'
2. 'indicator_field': ('value_from_feed', regex_string_extractor, string_formatter)
* regex_string_extractor will extract the first match from the value_from_feed,
Use None to get the full value of the field.
* string_formatter will format the data in your preferred way, Use None to get the extracted field.
3. 'indicator_field': ('value_from_feed', 'field_mapper_function')
* field_mapper_function will accept as an argument 'value_from_feed' and return the data
in your preferred way.
:param fieldnames: list of field names in the file. If *null* the values in the first row of the file are
used as names. Default: *null*
:param insecure: boolean, if *false* feed HTTPS server certificate is verified. Default: *false*
:param credentials: username and password used for basic authentication.
Can be also used as API key header and value by specifying _header in the username field.
:param ignore_regex: python regular expression for lines that should be ignored. Default: *null*
:param encoding: Encoding of the feed, latin-1 by default.
:param delimiter: see `csv Python module
<https://docs.python.org/2/library/csv.html#dialects-and-formatting-parameters>`. Default: ,
:param doublequote: see `csv Python module
<https://docs.python.org/2/library/csv.html#dialects-and-formatting-parameters>`. Default: true
:param escapechar: see `csv Python module
<https://docs.python.org/2/library/csv.html#dialects-and-formatting-parameters>`. Default null
:param quotechar: see `csv Python module
<https://docs.python.org/2/library/csv.html#dialects-and-formatting-parameters>`. Default "
:param skipinitialspace: see `csv Python module
<https://docs.python.org/2/library/csv.html#dialects-and-formatting-parameters>`. Default False
:param polling_timeout: timeout of the polling request in seconds. Default: 20
:param proxy: Sets whether use proxy when sending requests
:param tlp_color: Traffic Light Protocol color.
"""
self.tags: List[str] = argToList(feedTags)
self.tlp_color = tlp_color
self.value_field = value_field
if not credentials:
credentials = {}
auth: Optional[tuple] = None
self.headers = {}
username = credentials.get('identifier', '')
if username.startswith('_header:'):
header_name = username.split(':')[1]
header_value = credentials.get('password', '')
self.headers[header_name] = header_value
else:
password = credentials.get('password', '')
auth = None
if username and password:
auth = (username, password)
super().__init__(base_url=url, proxy=proxy, verify=not insecure, auth=auth)
try:
self.polling_timeout = int(polling_timeout)
except (ValueError, TypeError):
return_error('Please provide an integer value for "Request Timeout"')
self.encoding = encoding
self.ignore_regex: Optional[Pattern] = None
if ignore_regex is not None:
self.ignore_regex = re.compile(ignore_regex)
self.feed_url_to_config: Optional[Dict[str, dict]] = feed_url_to_config
self.fieldnames = argToList(fieldnames)
self.dialect: Dict[str, Any] = {
'delimiter': delimiter,
'doublequote': doublequote,
'escapechar': escapechar,
'quotechar': quotechar,
'skipinitialspace': skipinitialspace
}
def _build_request(self, url):
r = requests.Request(
'GET',
url,
auth=self._auth
)
return r.prepare()
def build_iterator(self, **kwargs):
results = []
urls = self._base_url
if not isinstance(urls, list):
urls = [urls]
for url in urls:
_session = requests.Session()
prepreq = self._build_request(url)
# this is to honour the proxy environment variables
kwargs.update(_session.merge_environment_settings(
prepreq.url,
{}, None, None, None # defaults
))
kwargs['stream'] = True
kwargs['verify'] = self._verify
kwargs['timeout'] = self.polling_timeout
if self.headers:
if 'headers' in kwargs:
kwargs['headers'].update(self.headers)
else:
kwargs['headers'] = self.headers
try:
r = _session.send(prepreq, **kwargs)
except requests.ConnectionError:
raise requests.ConnectionError('Failed to establish a new connection.'
' Please make sure your URL is valid.')
try:
r.raise_for_status()
except Exception:
return_error('Exception in request: {} {}'.format(r.status_code, r.content))
raise
response = self.get_feed_content_divided_to_lines(url, r)
if self.feed_url_to_config:
fieldnames = self.feed_url_to_config.get(url, {}).get('fieldnames', [])
skip_first_line = self.feed_url_to_config.get(url, {}).get('skip_first_line', False)
else:
fieldnames = self.fieldnames
skip_first_line = False
if self.ignore_regex is not None:
response = filter( # type: ignore
lambda x: self.ignore_regex.match(x) is None, # type: ignore
response
)
csvreader = csv.DictReader(
response,
fieldnames=fieldnames,
**self.dialect
)
if skip_first_line:
next(csvreader)
results.append({url: csvreader})
return results
def get_feed_content_divided_to_lines(self, url, raw_response):
"""Fetch feed data and divides its content to lines
Args:
url: Current feed's url.
raw_response: The raw response from the feed's url.
Returns:
List. List of lines from the feed content.
"""
if self.feed_url_to_config and self.feed_url_to_config.get(url).get('is_zipped_file'): # type: ignore
response_content = gzip.decompress(raw_response.content)
else:
response_content = raw_response.content
return response_content.decode(self.encoding).split('\n')
def determine_indicator_type(indicator_type, default_indicator_type, auto_detect, value):
"""
Detect the indicator type of the given value.
Args:
indicator_type: (str) Indicator type given in the config.
default_indicator_type: Indicator type which was inserted as a param of the integration by user.
auto_detect: (bool) True whether auto detection of the indicator type is wanted.
value: (str) The value which we'd like to get indicator type of.
Returns:
Str which stands for the indicator type after detection.
"""
if auto_detect:
indicator_type = auto_detect_indicator_type(value)
if not indicator_type:
indicator_type = default_indicator_type
return indicator_type
def module_test_command(client: Client, args):
client.build_iterator()
return 'ok', {}, {}
def date_format_parsing(date_string):
formatted_date = parse(date_string).isoformat()
if "+" in formatted_date:
formatted_date = formatted_date.split('+')[0]
if "." in formatted_date:
formatted_date = formatted_date.split('.')[0]
if not formatted_date.endswith('Z'):
formatted_date = formatted_date + 'Z'
return formatted_date
def create_fields_mapping(raw_json: Dict[str, Any], mapping: Dict[str, Union[Tuple, str]]):
fields_mapping = {} # type: dict
for key, field in mapping.items():
regex_extractor = None
formatter_string = None
field_mapper_function = None
# case 'value_from_feed', regex_string_extractor, string_formatter
if isinstance(field, tuple) and len(field) == 3:
field, regex_extractor, formatter_string = field
# case 'value_from_feed', 'field_mapper_function'
elif isinstance(field, tuple) and len(field) == 2:
field, field_mapper_function = field
if not raw_json.get(field): # type: ignore
continue
if not regex_extractor:
field_value = raw_json[field] # type: ignore
else:
try:
field_value = re.match(regex_extractor, raw_json[field]).group(1) # type: ignore
except Exception:
field_value = raw_json[field] # type: ignore
field_value = formatter_string.format(field_value) if formatter_string else field_value
field_value = field_mapper_function(field_value) if field_mapper_function else field_value
fields_mapping[key] = field_value
if key in ['firstseenbysource', 'lastseenbysource']:
fields_mapping[key] = date_format_parsing(fields_mapping[key])
return fields_mapping
def fetch_indicators_command(client: Client, default_indicator_type: str, auto_detect: bool, limit: int = 0, **kwargs):
iterator = client.build_iterator(**kwargs)
indicators = []
config = client.feed_url_to_config or {}
for url_to_reader in iterator:
for url, reader in url_to_reader.items():
mapping = config.get(url, {}).get('mapping', {})
for item in reader:
raw_json = dict(item)
fields_mapping = create_fields_mapping(raw_json, mapping) if mapping else {}
value = item.get(client.value_field) or fields_mapping.get('Value')
if not value and len(item) > 1:
value = next(iter(item.values()))
if value:
raw_json['value'] = value
conf_indicator_type = config.get(url, {}).get('indicator_type')
indicator_type = determine_indicator_type(conf_indicator_type, default_indicator_type, auto_detect,
value)
raw_json['type'] = indicator_type
indicator = {
'value': value,
'type': indicator_type,
'rawJSON': raw_json,
'fields': fields_mapping
}
indicator['fields']['tags'] = client.tags
if client.tlp_color:
indicator['fields']['trafficlightprotocol'] = client.tlp_color
indicators.append(indicator)
# exit the loop if we have more indicators than the limit
if limit and len(indicators) >= limit:
return indicators
return indicators
def get_indicators_command(client, args: dict, tags: Optional[List[str]] = None):
if tags is None:
tags = []
itype = args.get('indicator_type', demisto.params().get('indicator_type'))
try:
limit = int(args.get('limit', 50))
except ValueError:
raise ValueError('The limit argument must be a number.')
auto_detect = demisto.params().get('auto_detect_type')
indicators_list = fetch_indicators_command(client, itype, auto_detect, limit)
entry_result = indicators_list[:limit]
hr = tableToMarkdown('Indicators', entry_result, headers=['value', 'type', 'fields'])
return hr, {}, indicators_list
def feed_main(feed_name, params=None, prefix=''):
if not params:
params = {k: v for k, v in demisto.params().items() if v is not None}
handle_proxy()
client = Client(**params)
command = demisto.command()
if command != 'fetch-indicators':
demisto.info('Command being called is {}'.format(command))
if prefix and not prefix.endswith('-'):
prefix += '-'
# Switch case
commands: dict = {
'test-module': module_test_command,
f'{prefix}get-indicators': get_indicators_command
}
try:
if command == 'fetch-indicators':
indicators = fetch_indicators_command(
client,
params.get('indicator_type'),
params.get('auto_detect_type'),
params.get('limit'),
)
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b) # type: ignore
else:
args = demisto.args()
args['feed_name'] = feed_name
readable_output, outputs, raw_response = commands[command](client, args)
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {feed_name} Integration - Encountered an issue with createIndicators' if \
'failed to create' in str(e) else f'Error in {feed_name} Integration [{e}]'
return_error(err_msg)
| 41.530556
| 120
| 0.603839
|
a11b7f38c64e92d0d7cf58dfa3e73896a8aa0c32
| 6,096
|
py
|
Python
|
qa/rpc-tests/proxy_test.py
|
dre060/YAADI
|
cdb07c723f559ce883e33d64bce55b6ee5539142
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
dre060/YAADI
|
cdb07c723f559ce883e33d64bce55b6ee5539142
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
dre060/YAADI
|
cdb07c723f559ce883e33d64bce55b6ee5539142
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("yaadivj7kcklujarx.onion:5817", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "yaadivj7kcklujarx.onion")
assert_equal(cmd.port, 5817)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.753425
| 145
| 0.652887
|
c83740098d266f77fbed06ef5b4133c090be4202
| 642
|
py
|
Python
|
PyShop/products/migrations/0002_offer.py
|
nibir404/Product-Cart
|
f782740aa28d0d3d946721fe4a8df8c12b22bd99
|
[
"Apache-2.0"
] | null | null | null |
PyShop/products/migrations/0002_offer.py
|
nibir404/Product-Cart
|
f782740aa28d0d3d946721fe4a8df8c12b22bd99
|
[
"Apache-2.0"
] | null | null | null |
PyShop/products/migrations/0002_offer.py
|
nibir404/Product-Cart
|
f782740aa28d0d3d946721fe4a8df8c12b22bd99
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-08 16:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10)),
('description', models.CharField(max_length=255)),
('discount', models.FloatField()),
],
),
]
| 27.913043
| 115
| 0.543614
|
1ebcfcf4fb307858cffe23989680a76d0c92d639
| 2,312
|
py
|
Python
|
python-sockets-tutorial/multiconn-client.py
|
aln787/materials
|
2cca46bbdd898be34b0118be319d50fb985c9eb7
|
[
"MIT"
] | 1
|
2019-11-08T03:11:42.000Z
|
2019-11-08T03:11:42.000Z
|
python-sockets-tutorial/multiconn-client.py
|
saideepthik/materials
|
106a621e57bd2985d68be66b97b511b62bbe22a3
|
[
"MIT"
] | 1
|
2020-07-28T16:53:56.000Z
|
2020-07-28T16:53:56.000Z
|
python-sockets-tutorial/multiconn-client.py
|
saideepthik/materials
|
106a621e57bd2985d68be66b97b511b62bbe22a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import socket
import selectors
import types
sel = selectors.DefaultSelector()
messages = [b'Message 1 from client.', b'Message 2 from client.']
def start_connections(host, port, num_conns):
server_addr = (host, port)
for i in range(0, num_conns):
connid = i + 1
print('starting connection', connid, 'to', server_addr)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.connect_ex(server_addr)
events = selectors.EVENT_READ | selectors.EVENT_WRITE
data = types.SimpleNamespace(connid=connid,
msg_total=sum(len(m) for m in messages),
recv_total=0,
messages=list(messages),
outb=b'')
sel.register(sock, events, data=data)
def service_connection(key, mask):
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024) # Should be ready to read
if recv_data:
print('received', repr(recv_data), 'from connection', data.connid)
data.recv_total += len(recv_data)
if not recv_data or data.recv_total == data.msg_total:
print('closing connection', data.connid)
sel.unregister(sock)
sock.close()
if mask & selectors.EVENT_WRITE:
if not data.outb and data.messages:
data.outb = data.messages.pop(0)
if data.outb:
print('sending', repr(data.outb), 'to connection', data.connid)
sent = sock.send(data.outb) # Should be ready to write
data.outb = data.outb[sent:]
if len(sys.argv) != 4:
print('usage:', sys.argv[0], '<host> <port> <num_connections>')
sys.exit(1)
host, port, num_conns = sys.argv[1:4]
start_connections(host, int(port), int(num_conns))
try:
while True:
events = sel.select(timeout=1)
if events:
for key, mask in events:
service_connection(key, mask)
# Check for a socket being monitored to continue.
if not sel.get_map():
break
except KeyboardInterrupt:
print('caught keyboard interrupt, exiting')
finally:
sel.close()
| 33.028571
| 78
| 0.595156
|
2f9fe7e0d51aebfd184c5e04ff24758a2ee0799f
| 3,059
|
py
|
Python
|
api/api.py
|
gboduljak/seam-carving
|
206e72313424531f4ed22c2c1f5fc8cfbbb18920
|
[
"Unlicense"
] | 4
|
2020-06-18T11:47:26.000Z
|
2021-12-13T18:09:00.000Z
|
api/api.py
|
gboduljak/seam-carving
|
206e72313424531f4ed22c2c1f5fc8cfbbb18920
|
[
"Unlicense"
] | null | null | null |
api/api.py
|
gboduljak/seam-carving
|
206e72313424531f4ed22c2c1f5fc8cfbbb18920
|
[
"Unlicense"
] | 1
|
2021-07-24T02:49:43.000Z
|
2021-07-24T02:49:43.000Z
|
from carver import crop, get_energy_image
from flask import Flask, jsonify, request
from flask_cors import CORS
from numpy import array
from PIL import Image
from uuid import uuid4
from os import path
from io import BytesIO
from io import StringIO
import base64
import re
app = Flask(__name__, static_folder='resized-images')
cors = CORS(app)
app_root = path.dirname(path.abspath(__file__))
bad_request_status_code = 201
@app.route('/')
def home():
return 'api running...'
@app.route('/resize', methods=['POST'])
def resize():
errors = validate_resize_request(request)
if len(errors):
return jsonify(errors), bad_request_status_code
image_data = re.sub('^data:image/.+;base64,', '', request.form['image'])
image = Image.open(BytesIO(base64.b64decode(image_data))).convert('RGB')
[crop_rows, crop_cols] = [
int(request.form['crop_rows']),
int(request.form['crop_cols'])
]
processed_images = process_resize_request(image, crop_rows, crop_cols)
return jsonify({
"processed_images": processed_images,
}), 200
def validate_resize_request(request):
errors = []
if 'image' not in request.form:
errors.append('Image must be present.')
if 'crop_rows' not in request.form or not is_number(request.form['crop_rows']):
errors.append(
'crop_rows must be present in request and it must be an integer')
if 'crop_cols' not in request.form or not is_number(request.form['crop_cols']):
errors.append(
'crop_cols must be present in request and it must be an integer')
return errors
def process_resize_request(image: Image, crop_rows: int, crop_cols: int):
original_energy_image = get_energy_image(image)
cropped_image, marked_original_image, marked_energy_image = crop(
image.copy(),
image,
original_energy_image.copy(),
crop_rows, crop_cols
)
generated_names = get_images_names()
[
original_name,
cropped_name,
marked_name,
energy_name,
marked_energy_name
] = get_images_paths(generated_names)
marked_original_image.save(marked_name)
image.save(original_name)
cropped_image.save(cropped_name)
original_energy_image.convert('RGB').save(energy_name)
marked_energy_image.convert('RGB').save(marked_energy_name)
return get_images_urls(generated_names)
def get_images_paths(names: list) -> list:
return [path.join(app_root, 'resized-images', name) for name in names]
def get_images_urls(names: list) -> list:
return ['http://{0}/resized-images/{1}'.format(request.host, name) for name in names]
def get_images_names():
image_name = str(uuid4())
return [
'original-{0}.jpeg'.format(image_name),
'cropped-{0}.jpeg'.format(image_name),
'marked-{0}.jpeg'.format(image_name),
'energy-{0}.jpeg'.format(image_name),
'energy-marked-{0}.jpeg'.format(image_name)
]
def is_number(string: str) -> bool:
return string.replace('.', '', 1).isdigit()
| 29.133333
| 89
| 0.686826
|
35c8f17d7d5e565b205386aa67dd3aee19431a61
| 281
|
py
|
Python
|
natlas-server/tests/config.py
|
pryorda/natlas
|
048b5597c0ad77d1a95eadf8859e1122a9eb3d3a
|
[
"Apache-2.0"
] | null | null | null |
natlas-server/tests/config.py
|
pryorda/natlas
|
048b5597c0ad77d1a95eadf8859e1122a9eb3d3a
|
[
"Apache-2.0"
] | 370
|
2020-08-17T06:31:24.000Z
|
2022-03-28T02:09:45.000Z
|
natlas-server/tests/config.py
|
pryorda/natlas
|
048b5597c0ad77d1a95eadf8859e1122a9eb3d3a
|
[
"Apache-2.0"
] | null | null | null |
from config import Config
class TestConfig(Config):
MAIL_FROM = "Test Mail <noreply@example.com>"
MAIL_SERVER = "localhost"
TESTING = True
# This uses an in-memory database
SQLALCHEMY_DATABASE_URI = "sqlite://"
ELASTICSEARCH_URL = "http://localhost:9200"
| 25.545455
| 49
| 0.701068
|
faab3e3956a8b8c8e46ce67ade590cd7f3422eed
| 398
|
py
|
Python
|
deploy/test/kf_producer2.py
|
zhexiao/mnet
|
51e64e6c5181702f90d4f68efb5e4ba8f20dff7b
|
[
"Apache-2.0"
] | 19
|
2019-04-05T03:39:51.000Z
|
2021-11-09T10:55:58.000Z
|
deploy/test/kf_producer2.py
|
zhexiao/mnet
|
51e64e6c5181702f90d4f68efb5e4ba8f20dff7b
|
[
"Apache-2.0"
] | 3
|
2017-08-04T07:37:31.000Z
|
2021-06-10T19:42:58.000Z
|
deploy/test/kf_producer2.py
|
zhexiao/mnet
|
51e64e6c5181702f90d4f68efb5e4ba8f20dff7b
|
[
"Apache-2.0"
] | 11
|
2017-08-02T09:14:45.000Z
|
2021-05-07T15:33:07.000Z
|
from kafka import KafkaProducer
import time
import json
producer = KafkaProducer(bootstrap_servers='192.168.33.50:9092')
topic = 'test'
i = 0
while True:
i += 1
json_data = {
"msg": "my kafka {}".format(i),
"count": i
}
post_data = json.dumps(json_data).encode()
producer.send(topic, post_data)
print('producer - {0}'.format(post_data))
time.sleep(8)
| 19.9
| 64
| 0.633166
|
0518b415d8dc6e4130f1f5c985e345ec377de0e7
| 899
|
py
|
Python
|
GR/APP/migrations/0011_cartmodel.py
|
ymy838295768/GR
|
44de7d2f7bd540ae0af8d98f11d97f7783192a06
|
[
"MIT"
] | null | null | null |
GR/APP/migrations/0011_cartmodel.py
|
ymy838295768/GR
|
44de7d2f7bd540ae0af8d98f11d97f7783192a06
|
[
"MIT"
] | null | null | null |
GR/APP/migrations/0011_cartmodel.py
|
ymy838295768/GR
|
44de7d2f7bd540ae0af8d98f11d97f7783192a06
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-05-29 13:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('App', '0010_usermodel'),
]
operations = [
migrations.CreateModel(
name='CartModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('c_goods_num', models.IntegerField(default=1)),
('c_goods_select', models.BooleanField(default=True)),
('c_goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='App.Goods')),
('c_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='App.UserModel')),
],
),
]
| 33.296296
| 114
| 0.618465
|
4f392f5c981f9003c935bcfed83387728b125f08
| 7,229
|
py
|
Python
|
Tarea3a - EDPs/pez2.py
|
Nicolas-Francisco/Computer-Graphics
|
1895f3188c9ff662148a42c082a7191e2c83a06a
|
[
"CC-BY-4.0"
] | null | null | null |
Tarea3a - EDPs/pez2.py
|
Nicolas-Francisco/Computer-Graphics
|
1895f3188c9ff662148a42c082a7191e2c83a06a
|
[
"CC-BY-4.0"
] | null | null | null |
Tarea3a - EDPs/pez2.py
|
Nicolas-Francisco/Computer-Graphics
|
1895f3188c9ff662148a42c082a7191e2c83a06a
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[18]:
import glfw
from OpenGL.GL import *
import OpenGL.GL.shaders
import numpy as np
import sys
import math
import transformations as tr
import basic_shapes as bs
import scene_graph as sg
import easy_shaders as es
# A class to store the application control
class Controller:
def __init__(self):
self.fillPolygon = True
self.showAxis = True
# we will use the global controller as communication with the callback function
controller = Controller()
def on_key(window, key, scancode, action, mods):
if action != glfw.PRESS:
return
global controller
if key == glfw.KEY_SPACE:
controller.fillPolygon = not controller.fillPolygon
elif key == glfw.KEY_LEFT_CONTROL:
controller.showAxis = not controller.showAxis
elif key == glfw.KEY_ESCAPE:
sys.exit()
else:
print('Unknown key')
def createFish(color1, color2):
gpuBlackCube = es.toGPUShape(bs.createColorCube(0,0,0))
gpuCube1 = es.toGPUShape(bs.createColorCube(color1[0], color1[1], color1[2]))
gpuCube2 = es.toGPUShape(bs.createColorCube(color2[0], color2[1], color2[2]))
# Cheating a single wheel
aleta1 = sg.SceneGraphNode("wheel")
aleta1.transform = tr.matmul([tr.translate(0.2, 0.4, 0), tr.scale(0.4, 0.15, 0.05), tr.rotationZ(-np.pi/6)])
aleta1.childs += [gpuCube2]
aleta2 = sg.SceneGraphNode("wheel")
aleta2.transform = tr.matmul([tr.translate(0.1, 0.4, 0), tr.scale(0.4, 0.15, 0.05), tr.rotationZ(np.pi/6)])
aleta2.childs += [gpuCube2]
aleta3 = sg.SceneGraphNode("wheel")
aleta3.transform = tr.matmul([tr.translate(0.2, -0.4, 0), tr.scale(0.4, 0.15, 0.05), tr.rotationZ(np.pi/6)])
aleta3.childs += [gpuCube2]
aleta4 = sg.SceneGraphNode("wheel")
aleta4.transform = tr.matmul([tr.translate(0.1, -0.4, 0), tr.scale(0.4, 0.15, 0.05), tr.rotationZ(-np.pi/6)])
aleta4.childs += [gpuCube2]
aleta5 = sg.SceneGraphNode("wheel")
aleta5.transform = tr.matmul([tr.translate(-1.15, 0, 0.1), tr.scale(0.15, 0.05, 0.4), tr.rotationY(np.pi/6)])
aleta5.childs += [gpuCube2]
aleta6 = sg.SceneGraphNode("wheel")
aleta6.transform = tr.matmul([tr.translate(-1.15, 0, -0.1), tr.scale(0.15, 0.05, 0.4), tr.rotationY(-np.pi/6)])
aleta6.childs += [gpuCube2]
# Creating the chasis of the car
Cube1 = sg.SceneGraphNode("chasis1")
Cube1.transform = tr.matmul([tr.translate(0, 0, 0), tr.scale(0.4, 0.1, 1)])
Cube1.childs += [gpuCube1]
Cube2 = sg.SceneGraphNode("chasis2")
Cube2.transform = tr.matmul([tr.translate(0.1, 0, 0), tr.scale(0.4, 0.1, 0.9)])
Cube2.childs += [gpuCube1]
Cube3 = sg.SceneGraphNode("chasis2")
Cube3.transform = tr.matmul([tr.translate(0.2, 0, 0), tr.scale(0.4, 0.1, 0.8)])
Cube3.childs += [gpuCube1]
Cube4 = sg.SceneGraphNode("chasis2")
Cube4.transform = tr.matmul([tr.translate(-0.2, 0, 0), tr.scale(0.4, 0.1, 0.8)])
Cube4.childs += [gpuCube1]
Cube5 = sg.SceneGraphNode("chasis2")
Cube5.transform = tr.matmul([tr.translate(-0.4, 0, 0), tr.scale(0.4, 0.1, 0.6)])
Cube5.childs += [gpuCube1]
Cube6 = sg.SceneGraphNode("chasis2")
Cube6.transform = tr.matmul([tr.translate(-0.6, 0, 0), tr.scale(0.4, 0.1, 0.4)])
Cube6.childs += [gpuCube1]
Cube7 = sg.SceneGraphNode("chasis2")
Cube7.transform = tr.matmul([tr.translate(0.3, 0, 0), tr.scale(0.4, 0.1, 0.7)])
Cube7.childs += [gpuCube1]
Cube8 = sg.SceneGraphNode("chasis2")
Cube8.transform = tr.matmul([tr.translate(-0.8, 0, 0), tr.scale(0.4, 0.1, 0.2)])
Cube8.childs += [gpuCube1]
Cube9 = sg.SceneGraphNode("chasis2")
Cube9.transform = tr.matmul([tr.translate(0.4, 0, 0), tr.scale(0.4, 0.1, 0.4)])
Cube9.childs += [gpuCube1]
Cube10 = sg.SceneGraphNode("chasis2")
Cube10.transform = tr.matmul([tr.translate(0.35, 0, 0.29), tr.scale(0.1, 0.21, 0.1)])
Cube10.childs += [gpuBlackCube]
# All pieces together
car = sg.SceneGraphNode("chasis")
car.childs += [Cube1]
car.childs += [Cube2]
car.childs += [Cube3]
car.childs += [Cube4]
car.childs += [Cube5]
car.childs += [Cube6]
car.childs += [Cube7]
car.childs += [Cube8]
car.childs += [Cube9]
car.childs += [Cube10]
car.childs += [aleta1]
car.childs += [aleta2]
car.childs += [aleta3]
car.childs += [aleta4]
car.childs += [aleta5]
car.childs += [aleta6]
return car
if __name__ == "__main__":
# Initialize glfw
if not glfw.init():
sys.exit()
width = 600
height = 600
window = glfw.create_window(width, height, "3D fish", None, None)
if not window:
glfw.terminate()
sys.exit()
glfw.make_context_current(window)
# Connecting the callback function 'on_key' to handle keyboard events
glfw.set_key_callback(window, on_key)
# Assembling the shader program (pipeline) with both shaders
mvpPipeline = es.SimpleModelViewProjectionShaderProgram()
# Telling OpenGL to use our shader program
glUseProgram(mvpPipeline.shaderProgram)
# Setting up the clear screen color
glClearColor(0.85, 0.85, 0.85, 1.0)
# As we work in 3D, we need to check which part is in front,
# and which one is at the back
glEnable(GL_DEPTH_TEST)
# Creating shapes on GPU memory
gpuAxis = es.toGPUShape(bs.createAxis(7))
redFishNode = createFish([1,0,0], [1,0.5,0.5])
# Using the same view and projection matrices in the whole application
projection = tr.perspective(45, float(width)/float(height), 0.1, 100)
glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, "projection"), 1, GL_TRUE, projection)
view = tr.lookAt(
np.array([5,5,7]),
np.array([0,0,0]),
np.array([0,0,1])
)
glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, "view"), 1, GL_TRUE, view)
while not glfw.window_should_close(window):
# Using GLFW to check for input events
glfw.poll_events()
# Clearing the screen in both, color and depth
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Filling or not the shapes depending on the controller state
if (controller.fillPolygon):
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
else:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
if controller.showAxis:
glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, "model"), 1, GL_TRUE, tr.identity())
mvpPipeline.drawShape(gpuAxis, GL_LINES)
# Moving the red car and rotating its wheels
# Uncomment to print the red car position on every iteration
#print(sg.findPosition(redCarNode, "car"))
redFishNode.transform = np.matmul(tr.rotationZ(1 * glfw.get_time()), tr.translate(0,0,0))
# Drawing the Car
sg.drawSceneGraphNode(redFishNode, mvpPipeline, "model")
# Once the render is done, buffers are swapped, showing only the complete scene.
glfw.swap_buffers(window)
glfw.terminate()
# In[ ]:
# In[ ]:
| 29.871901
| 115
| 0.637709
|
0e045177669c913d8d585c53be1bd79c945e46a3
| 447
|
py
|
Python
|
pyfenstein3d/engine/__init__.py
|
GrrriiiM/pyfenstein3d
|
0ac7655897f91edfe13554449e9c1f1e6b1ba504
|
[
"MIT"
] | 7
|
2020-12-17T19:39:15.000Z
|
2021-09-06T01:03:57.000Z
|
pyfenstein3d/engine/__init__.py
|
GrrriiiM/pyfenstein3d
|
0ac7655897f91edfe13554449e9c1f1e6b1ba504
|
[
"MIT"
] | null | null | null |
pyfenstein3d/engine/__init__.py
|
GrrriiiM/pyfenstein3d
|
0ac7655897f91edfe13554449e9c1f1e6b1ba504
|
[
"MIT"
] | null | null | null |
from .vector2d import Vector2d
from .item import Item
from .wall import Wall
from .decoration import Decoration
from .person import Person
from .field_of_view import FieldOfView
from .map2d import Map2d
from . import config
from .item_grid import ItemGrid
from .ray import Ray
from .player import Player
from .server import Server
from .animation import Animation
from .weapon import Weapon
from .weapon import WeaponPistol
from .door import Door
| 26.294118
| 38
| 0.821029
|
c06c78cbc5b99e8a8147fe51e3c3586cf031dba9
| 9,628
|
py
|
Python
|
pycircuit/post/functions.py
|
henjo/pycircu
|
20eb79fd05bd7903f7033b41ad8a1b106e99d9e3
|
[
"BSD-3-Clause"
] | 25
|
2015-05-13T22:49:26.000Z
|
2020-03-10T04:13:20.000Z
|
pycircuit/post/functions.py
|
henjo/pycircu
|
20eb79fd05bd7903f7033b41ad8a1b106e99d9e3
|
[
"BSD-3-Clause"
] | 1
|
2016-11-09T13:09:31.000Z
|
2016-11-09T13:09:31.000Z
|
pycircuit/post/functions.py
|
henjo/pycircu
|
20eb79fd05bd7903f7033b41ad8a1b106e99d9e3
|
[
"BSD-3-Clause"
] | 9
|
2016-03-05T11:46:27.000Z
|
2022-01-19T18:30:55.000Z
|
# -*- coding: latin-1 -*-
# Copyright (c) 2008 Pycircuit Development Team
# See LICENSE for details.
"""This module contains functions that operates on wave objects or scalars"""
from waveform import Waveform, reducedim, applyfunc, applyfunc_and_reducedim,\
iswave, wavefunc, assert_waveform
import numpy as np
from numpy import array, pi, sign, alltrue, where, arange, vstack, \
sin, log10, sqrt, nan
import scipy as sp
import scipy.optimize as optimize
def db10(w):
"""Return x in dB where x is assumed to be a non-power quantity
>>> w1=Waveform(array([1,2,3]),array([complex(-1,0),complex(0,1),2]))
>>> db10(w1)
Waveform(array([1, 2, 3]), array([ 0. , 0. , 3.01029996]))
"""
return applyfunc(lambda x: 10.0*log10(abs(x)), w, 'db10')
def db20(w):
"""Return x in dB where x is assumed to be a non-power quantity
>>> w1=Waveform(array([1,2,3]),array([complex(-1,0),complex(0,1), \
complex(1,-1)]), ylabel='x')
>>> db20(w1)
Waveform(array([1, 2, 3]), array([ 0. , 0. , 3.01029996]))
>>> db20(w1).ylabel
'db20(x)'
"""
return applyfunc(lambda x: 20.0*log10(abs(x)), w, 'db20')
@wavefunc
def ymax(w, axis=-1):
return w
@wavefunc
def ymin(w, axis=-1):
return w
@wavefunc
def value(w, x):
return w
@wavefunc
def imag(w):
return np.imag(w)
@wavefunc
def real(w):
return np.real(w)
raising = 1
falling = 2
either = 3
def cross(w, crossval = 0.0, n=0, crosstype=either, axis=-1):
"""Calculates the x-axis value where a particular crossing with the
specified edge type occurs
Examples:
1-d waveform
>>> phi = arange(0, 4*pi, pi/10)-pi/4
>>> y = Waveform(phi, sin(phi))
>>> cross(y)
0.0
>>> cross(y, crosstype=falling)
3.1415926535897931
2-d waveform
>>> x1 = [pi/4,pi/2]
>>> x2 = arange(0, 4*pi, pi/10)-pi/4
>>> phi = vstack([x2 for p in x1])
>>> y = Waveform([x1,x2], sin(phi))
>>> cross(y)
Waveform(array([ 0.78539816, 1.57079633]), array([ 0., 0.]))
No crossing
>>> cross(Waveform([[0,1,2,3]], array([1,1,1,1])))
nan
.. todo:: handle case where x-values are exactly at the crossing
"""
x = w.get_x(axis)
def findedge(y):
## Find edges
if crosstype == either:
edges = sign(y[:-1]) != sign(y[1:])
elif crosstype == raising:
edges = sign(y[:-1]) < sign(y[1:])
elif crosstype == falling:
edges = sign(y[:-1]) > sign(y[1:])
if alltrue(edges == False):
return nan
iedge = where(edges)[0][n]
## Find exact location of the crossing using interpolated x-values
finterp = sp.interpolate.interp1d(x, y)
return optimize.zeros.brenth(finterp, x[iedge], x[iedge+1])
return applyfunc_and_reducedim(findedge, w - crossval, yunit = w.xunits[0],
ylabel = w.xlabels[-1], axis=axis)
def phase(w):
"""Return argument in degrees of complex values
Example:
>>> phase(1)
0.0
>>> phase(complex(0,1))
90.0
>>> phase(Waveform((range(3),), array([1, complex(1,1), complex(0,-1)])))
Waveform(array([0, 1, 2]), array([ 0., 45., -90.]))
"""
def phase(x):
return np.angle(w, deg=True)
return applyfunc(phase , w)
def phase_margin(g):
"""Calculate phase margin of a loop gain vs frequency waveform
>>> w = 2 * pi * np.logspace(3,8,41)
>>> w1 = -1e6
>>> H = Waveform(w, 1.5 * (1 / (1 - 1j*w / w1))**2)
>>> '%0.4g'%phase_margin(H)
'110.4'
"""
f0 = cross(abs(g), 1.0)
return phase(-g.value(f0))
def bandwidth(w, db = 3.0, type = 'low'):
"""Calculate bandwidth of transfer as function of frequency
Example:
>>> w = 2 * pi * np.logspace(3,8)
>>> w1 = -1e6
>>> H = Waveform(w, 1 / (1 - 1j*w / w1))
>>> bandwidth(H)
1000896.9666087811
"""
xmin = min(w._xlist[-1])
w0 = abs(w.value(xmin))
return cross(abs(w), w0*10**(-db/20.0))
def unityGainFrequency(g):
"""Calculate the frequency where the gain is unity
"""
return cross(abs(g), 1.0)
def IM3(w, fund1, fund2, fund0=None):
"""Return input referred third order intermodulation tone
The intermodulation product is evaluated at fund1 + 2 * fund2
"""
return value(abs(w), fund1 + 2 * fund2)
def IM2(w, fund1, fund2, fund0=None):
"""Return input referred third order intermodulation tone
The intermodulation product is evaluated at fund1 + fund2
"""
return value(abs(w), fund1 + fund2)
def IIP3(output, input, fund1, fund2, fund0=None):
"""Calculate input referred third order intermodulation intercept point
The intermodulation product is evaluated at fund1 + 2 * fund2
"""
s = abs(output)
if fund0 is None:
gain = value(s/abs(input), fund1)
else:
gain = value(s, abs(fund1)) / value(abs(input), abs(abs(fund1)+fund0))
return sqrt(s.value(abs(fund1)) * value(s,abs(fund2))**2 /
value(s, fund1 + 2 * fund2)) / gain
def IIP2(output, input, fund1, fund2, fund0=None):
"""Calculate input referred second order intermodulation intercept point
The intermodulation product is evaluated at fund1 + fund2
"""
s = abs(output)
if fund0 is None:
gain = value(s/abs(input), fund1)
else:
gain = value(s, abs(fund1)) / value(abs(input), abs(abs(fund1)+fund0))
return value(s, abs(fund1)) * value(s, abs(fund2)) \
/ value(s, fund1 + fund2) / gain
def clip(w, xfrom, xto=None):
if isinstance(w, Waveform):
return w.clip(xfrom, xto)
else:
return w
def average(w, axis=-1):
"""Calculate average
Example:
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]))
>>> average(w1)
Waveform(array([0, 1]), array([ 2. , 2.5]))
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]), \
xlabels=['row','col'])
>>> average(w1, axis='row')
Waveform(array([0, 1]), array([ 0.5, 4. ]))
"""
return reducedim(w, np.mean(w._y, axis=w.getaxis(axis)),
axis=w.getaxis(axis))
def rms(w, axis=-1):
"""Calculate root-mean-square"""
return reducedim(w, sqrt(np.mean(w._y**2, axis=w.getaxis(axis))),
axis=w.getaxis(axis))
def stddev(w, axis=-1):
"""Calculate the standard deviation
Returns the standard deviation over the highest dimension, a measure of the
spread of a distribution.
Example:
>>> w1=Waveform([range(2), range(4)], array([[1,2,3,4],[1,1,1,1]]))
>>> stddev(w1)
Waveform(array([0, 1]), array([ 1.11803399, 0. ]))
"""
return reducedim(w, np.std(w._y, axis=w.getaxis(axis)),
axis=w.getaxis(axis))
def deriv(w):
"""Calculate derivative of a waveform with respect to the inner x-axis"""
assert_waveform(w)
return w.deriv()
def dft(w):
"""Calculates the discrete Fourier transform of the input waveform"""
def calc_extrapolation_line(w_db, slope, extrapolation_point=None,
axis = -1, plot = False, plotargs = {}):
"""Return linear extrapolation line and optionally plot it"""
if extrapolation_point is None:
extrapolation_point = w_db.xval(axis=axis).ymin(axis=axis)
m = w_db.value(extrapolation_point, axis=axis) - slope * extrapolation_point
print m.xlabels, w_db.xlabels
interpol_line = m + slope * w_db.xval(axis=axis)
if plot:
return interpol_line, interpol_line.plot(**plotargs)
else:
return interpol_line
def compression_point(w_db, slope = 1, compression = 1,
extrapolation_point = None, axis = -1):
"""Return input referred compression point"""
interpol_line = calc_extrapolation_line(w_db, slope, extrapolation_point,
axis)
return cross(interpol_line - w_db, compression)
def compression_plot(w_db, extrapolation_point=None,
compression = 1, axis = -1):
"""Plot of compression point of a quantity in dB
Both x and y axis should be in dB units
"""
## Plot curve
w_db.plot(axis=axis)
## Plot extrapolation line
pl.hold(True)
calc_extrapolation_line(w_db, 1,
extrapolation_point=extrapolation_point,
axis=axis,
plot = True,
plotargs={'linestyle':'dashed',
'color': 'black'})
## Calculate intercept point
w_x_cp = compression_point(w_db)
w_y_cp = w_db.value(w_x_cp, axis=axis)
## Draw measurement arrows
for x_cp, y_cp in iterate_over_values(w_x_cp, w_y_cp):
ax = pl.gca()
x_offset = 0
y_offset = 0.25 * (ax.get_xbound()[1] - ax.get_xbound()[0])
ax.annotate("1 dB compression\n@ (%0.2g, %0.2g)"%(x_cp,y_cp),
xy = (x_cp, y_cp), xycoords='data',
xytext = (x_cp + x_offset, y_cp + y_offset),
textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
pl.grid()
## Plot curve again to get labels right
w_db.plot(axis=axis)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28.826347
| 80
| 0.565019
|
6fe4c623c09651bbad95b6db524df7aa4b11540f
| 2,278
|
py
|
Python
|
mlf_core/create/templates/package/package_prediction/{{ cookiecutter.project_slug_no_hyphen }}/{{cookiecutter.project_slug_no_hyphen}}/cli_pytorch.py
|
mlf-core/mlf-core
|
016f6186b5b62622c3a2b3ca884331fe0165b97c
|
[
"Apache-2.0"
] | 31
|
2020-10-04T14:54:54.000Z
|
2021-11-22T09:33:17.000Z
|
mlf_core/create/templates/package/package_prediction/{{ cookiecutter.project_slug_no_hyphen }}/{{cookiecutter.project_slug_no_hyphen}}/cli_pytorch.py
|
mlf-core/mlf_core
|
cea155595df95d1d22473605d29813f5d698d635
|
[
"Apache-2.0"
] | 200
|
2020-08-05T13:51:14.000Z
|
2022-03-28T00:25:54.000Z
|
mlf_core/create/templates/package/package_prediction/{{ cookiecutter.project_slug_no_hyphen }}/{{cookiecutter.project_slug_no_hyphen}}/cli_pytorch.py
|
mlf-core/mlf_core
|
cea155595df95d1d22473605d29813f5d698d635
|
[
"Apache-2.0"
] | 3
|
2020-11-29T17:03:52.000Z
|
2021-06-03T13:12:03.000Z
|
import os
import sys
import click
import numpy as np
import torch
from rich import print, traceback
WD = os.path.dirname(__file__)
@click.command()
@click.option('-i', '--input', required=True, type=str, help='Path to data file to predict.')
@click.option('-m', '--model', type=str, help='Path to an already trained XGBoost model. If not passed a default model will be loaded.')
@click.option('-c/-nc', '--cuda/--no-cuda', type=bool, default=False, help='Whether to enable cuda or not')
@click.option('-o', '--output', type=str, help='Path to write the output to')
def main(input: str, model: str, cuda: bool, output: str):
"""Command-line interface for {{ cookiecutter.project_name }}"""
print(r"""[bold blue]
{{ cookiecutter.project_name }}
""")
print('[bold blue]Run [green]{{ cookiecutter.project_name }} --help [blue]for an overview of all commands\n')
if not model:
model = get_pytorch_model(f'{WD}/models/pytorch_test_model')
else:
model = get_pytorch_model(model)
if cuda:
model.cuda()
print('[bold blue] Parsing data')
data_to_predict = read_data_to_predict(input)
print('[bold blue] Performing predictions')
predictions = np.round(model.predict(data_to_predict))
print(predictions)
if output:
print(f'[bold blue]Writing predictions to {output}')
write_results(predictions, output)
def read_data_to_predict(path_to_data_to_predict: str):
"""
Parses the data to predict and returns a full Dataset include the DMatrix
:param path_to_data_to_predict: Path to the data on which predictions should be performed on
"""
return
def write_results(predictions: np.ndarray, path_to_write_to) -> None:
"""
Writes the predictions into a human readable file.
:param predictions: Predictions as a numpy array
:param path_to_write_to: Path to write the predictions to
"""
pass
def get_pytorch_model(path_to_pytorch_model: str):
"""
Fetches the model of choice and creates a booster from it.
:param path_to_pytorch_model: Path to the xgboost model1
"""
model = torch.load(path_to_pytorch_model)
return model
if __name__ == "__main__":
traceback.install()
sys.exit(main()) # pragma: no cover
| 32.542857
| 136
| 0.691396
|
bf1fa60d1f100490e409759b6e69fc7789d904d0
| 558
|
py
|
Python
|
hyperverlet/factories/model_factory.py
|
Zinoex/hyperverlet
|
431ef92fa2448ce69c357f01c0862353067bfa8a
|
[
"MIT"
] | 7
|
2021-08-02T09:10:35.000Z
|
2022-03-16T13:24:22.000Z
|
hyperverlet/factories/model_factory.py
|
Zinoex/hyperverlet
|
431ef92fa2448ce69c357f01c0862353067bfa8a
|
[
"MIT"
] | 2
|
2021-06-15T11:50:59.000Z
|
2021-06-16T12:23:51.000Z
|
hyperverlet/factories/model_factory.py
|
Zinoex/hyperverlet
|
431ef92fa2448ce69c357f01c0862353067bfa8a
|
[
"MIT"
] | null | null | null |
from hyperverlet.models.pendulum import PendulumModel, SymplecticPendulumModel
from hyperverlet.models.spring_mass import SpringMassModel, SymplecticSpringMassModel
def construct_model(module_config):
module = module_config["module"]
module_mapping = dict(
PendulumModel=PendulumModel,
SymplecticPendulumModel=SymplecticPendulumModel,
SpringMassModel=SpringMassModel,
SymplecticSpringMassModel=SymplecticSpringMassModel
)
module = module_mapping[module]
model = module(module_config)
return model
| 29.368421
| 85
| 0.781362
|
fd2304c7aead291b4b7482703787c3ee0a6d951e
| 900
|
py
|
Python
|
setup.py
|
tibidi/pychromecast
|
d7acb9f5ae2c0daa797d78da1a1e8090b4181d21
|
[
"MIT"
] | null | null | null |
setup.py
|
tibidi/pychromecast
|
d7acb9f5ae2c0daa797d78da1a1e8090b4181d21
|
[
"MIT"
] | null | null | null |
setup.py
|
tibidi/pychromecast
|
d7acb9f5ae2c0daa797d78da1a1e8090b4181d21
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
long_description = open("README.rst").read()
setup(
name="PyChromecast",
version="10.2.3",
license="MIT",
url="https://github.com/balloob/pychromecast",
author="Paulus Schoutsen",
author_email="paulus@paulusschoutsen.nl",
description="Python module to talk to Google Chromecast.",
long_description=long_description,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms="any",
install_requires=list(val.strip() for val in open("requirements.txt")),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 31.034483
| 75
| 0.665556
|
47bf252fc9f46698b19be12c71c1e7eac989af02
| 17,441
|
py
|
Python
|
data/model/oci/manifest.py
|
cuisongliu/quay
|
49862a9edae967b64f40cfbde7ef849f4619b26b
|
[
"Apache-2.0"
] | 1
|
2020-10-16T19:30:41.000Z
|
2020-10-16T19:30:41.000Z
|
data/model/oci/manifest.py
|
cuisongliu/quay
|
49862a9edae967b64f40cfbde7ef849f4619b26b
|
[
"Apache-2.0"
] | 15
|
2020-06-18T15:32:06.000Z
|
2022-03-03T23:06:24.000Z
|
data/model/oci/manifest.py
|
room9ho/sac1
|
79ca6f28cf37fce11b21801df947e5dc3d0acbd7
|
[
"Apache-2.0"
] | null | null | null |
import logging
from collections import namedtuple
from peewee import IntegrityError
from data.database import (
Tag,
Manifest,
ManifestBlob,
ManifestLegacyImage,
ManifestChild,
db_transaction,
)
from data.model import BlobDoesNotExist
from data.model.blob import get_or_create_shared_blob, get_shared_blob
from data.model.oci.tag import filter_to_alive_tags, create_temporary_tag_if_necessary
from data.model.oci.label import create_manifest_label
from data.model.oci.retriever import RepositoryContentRetriever
from data.model.storage import lookup_repo_storages_by_content_checksum
from data.model.image import lookup_repository_images, get_image, synthesize_v1_image
from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES
from image.docker.schema1 import ManifestException
from image.docker.schema2.list import MalformedSchema2ManifestList
from util.validation import is_json
TEMP_TAG_EXPIRATION_SEC = 300 # 5 minutes
logger = logging.getLogger(__name__)
CreatedManifest = namedtuple("CreatedManifest", ["manifest", "newly_created", "labels_to_apply"])
class CreateManifestException(Exception):
"""
Exception raised when creating a manifest fails and explicit exception raising is requested.
"""
def lookup_manifest(
repository_id,
manifest_digest,
allow_dead=False,
require_available=False,
temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
):
"""
Returns the manifest with the specified digest under the specified repository or None if none.
If allow_dead is True, then manifests referenced by only dead tags will also be returned. If
require_available is True, the manifest will be marked with a temporary tag to ensure it remains
available.
"""
if not require_available:
return _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead)
with db_transaction():
found = _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead)
if found is None:
return None
create_temporary_tag_if_necessary(found, temp_tag_expiration_sec)
return found
def _lookup_manifest(repository_id, manifest_digest, allow_dead=False):
query = (
Manifest.select()
.where(Manifest.repository == repository_id)
.where(Manifest.digest == manifest_digest)
)
if allow_dead:
try:
return query.get()
except Manifest.DoesNotExist:
return None
# Try first to filter to those manifests referenced by an alive tag,
try:
return filter_to_alive_tags(query.join(Tag)).get()
except Manifest.DoesNotExist:
pass
# Try referenced as the child of a manifest that has an alive tag.
query = query.join(ManifestChild, on=(ManifestChild.child_manifest == Manifest.id)).join(
Tag, on=(Tag.manifest == ManifestChild.manifest)
)
query = filter_to_alive_tags(query)
try:
return query.get()
except Manifest.DoesNotExist:
return None
def get_or_create_manifest(
repository_id,
manifest_interface_instance,
storage,
temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
for_tagging=False,
raise_on_error=False,
retriever=None,
):
"""
Returns a CreatedManifest for the manifest in the specified repository with the matching digest
(if it already exists) or, if not yet created, creates and returns the manifest.
Returns None if there was an error creating the manifest, unless raise_on_error is specified,
in which case a CreateManifestException exception will be raised instead to provide more
context to the error.
Note that *all* blobs referenced by the manifest must exist already in the repository or this
method will fail with a None.
"""
existing = lookup_manifest(
repository_id,
manifest_interface_instance.digest,
allow_dead=True,
require_available=True,
temp_tag_expiration_sec=temp_tag_expiration_sec,
)
if existing is not None:
return CreatedManifest(manifest=existing, newly_created=False, labels_to_apply=None)
return _create_manifest(
repository_id,
manifest_interface_instance,
storage,
temp_tag_expiration_sec,
for_tagging=for_tagging,
raise_on_error=raise_on_error,
retriever=retriever,
)
def _create_manifest(
repository_id,
manifest_interface_instance,
storage,
temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
for_tagging=False,
raise_on_error=False,
retriever=None,
):
# Validate the manifest.
retriever = retriever or RepositoryContentRetriever.for_repository(repository_id, storage)
try:
manifest_interface_instance.validate(retriever)
except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex:
logger.exception("Could not validate manifest `%s`", manifest_interface_instance.digest)
if raise_on_error:
raise CreateManifestException(str(ex))
return None
# Load, parse and get/create the child manifests, if any.
child_manifest_refs = manifest_interface_instance.child_manifests(retriever)
child_manifest_rows = {}
child_manifest_label_dicts = []
if child_manifest_refs is not None:
for child_manifest_ref in child_manifest_refs:
# Load and parse the child manifest.
try:
child_manifest = child_manifest_ref.manifest_obj
except (
ManifestException,
MalformedSchema2ManifestList,
BlobDoesNotExist,
IOError,
) as ex:
logger.exception(
"Could not load manifest list for manifest `%s`",
manifest_interface_instance.digest,
)
if raise_on_error:
raise CreateManifestException(str(ex))
return None
# Retrieve its labels.
labels = child_manifest.get_manifest_labels(retriever)
if labels is None:
if raise_on_error:
raise CreateManifestException("Unable to retrieve manifest labels")
logger.exception("Could not load manifest labels for child manifest")
return None
# Get/create the child manifest in the database.
child_manifest_info = get_or_create_manifest(
repository_id, child_manifest, storage, raise_on_error=raise_on_error
)
if child_manifest_info is None:
if raise_on_error:
raise CreateManifestException("Unable to retrieve child manifest")
logger.error("Could not get/create child manifest")
return None
child_manifest_rows[child_manifest_info.manifest.digest] = child_manifest_info.manifest
child_manifest_label_dicts.append(labels)
# Ensure all the blobs in the manifest exist.
digests = set(manifest_interface_instance.local_blob_digests)
blob_map = {}
# If the special empty layer is required, simply load it directly. This is much faster
# than trying to load it on a per repository basis, and that is unnecessary anyway since
# this layer is predefined.
if EMPTY_LAYER_BLOB_DIGEST in digests:
digests.remove(EMPTY_LAYER_BLOB_DIGEST)
blob_map[EMPTY_LAYER_BLOB_DIGEST] = get_shared_blob(EMPTY_LAYER_BLOB_DIGEST)
if not blob_map[EMPTY_LAYER_BLOB_DIGEST]:
if raise_on_error:
raise CreateManifestException("Unable to retrieve specialized empty blob")
logger.warning("Could not find the special empty blob in storage")
return None
if digests:
query = lookup_repo_storages_by_content_checksum(repository_id, digests)
blob_map.update({s.content_checksum: s for s in query})
for digest_str in digests:
if digest_str not in blob_map:
logger.warning(
"Unknown blob `%s` under manifest `%s` for repository `%s`",
digest_str,
manifest_interface_instance.digest,
repository_id,
)
if raise_on_error:
raise CreateManifestException("Unknown blob `%s`" % digest_str)
return None
# Special check: If the empty layer blob is needed for this manifest, add it to the
# blob map. This is necessary because Docker decided to elide sending of this special
# empty layer in schema version 2, but we need to have it referenced for GC and schema version 1.
if EMPTY_LAYER_BLOB_DIGEST not in blob_map:
try:
requires_empty_layer = manifest_interface_instance.get_requires_empty_layer_blob(
retriever
)
except ManifestException as ex:
if raise_on_error:
raise CreateManifestException(str(ex))
return None
if requires_empty_layer is None:
if raise_on_error:
raise CreateManifestException("Could not load configuration blob")
return None
if requires_empty_layer:
shared_blob = get_or_create_shared_blob(
EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES, storage
)
assert not shared_blob.uploading
assert shared_blob.content_checksum == EMPTY_LAYER_BLOB_DIGEST
blob_map[EMPTY_LAYER_BLOB_DIGEST] = shared_blob
# Determine and populate the legacy image if necessary. Manifest lists will not have a legacy
# image.
legacy_image = None
if manifest_interface_instance.has_legacy_image:
legacy_image_id = _populate_legacy_image(
repository_id, manifest_interface_instance, blob_map, retriever, raise_on_error
)
if legacy_image_id is None:
return None
legacy_image = get_image(repository_id, legacy_image_id)
if legacy_image is None:
return None
# Create the manifest and its blobs.
media_type = Manifest.media_type.get_id(manifest_interface_instance.media_type)
storage_ids = {storage.id for storage in blob_map.values()}
with db_transaction():
# Check for the manifest. This is necessary because Postgres doesn't handle IntegrityErrors
# well under transactions.
try:
manifest = Manifest.get(
repository=repository_id, digest=manifest_interface_instance.digest
)
return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
except Manifest.DoesNotExist:
pass
# Create the manifest.
try:
manifest = Manifest.create(
repository=repository_id,
digest=manifest_interface_instance.digest,
media_type=media_type,
manifest_bytes=manifest_interface_instance.bytes.as_encoded_str(),
)
except IntegrityError as ie:
try:
manifest = Manifest.get(
repository=repository_id, digest=manifest_interface_instance.digest
)
except Manifest.DoesNotExist:
logger.error("Got integrity error when trying to create manifest: %s", ie)
if raise_on_error:
raise CreateManifestException(
"Attempt to create an invalid manifest. Please report this issue."
)
return None
return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
# Insert the blobs.
blobs_to_insert = [
dict(manifest=manifest, repository=repository_id, blob=storage_id)
for storage_id in storage_ids
]
if blobs_to_insert:
ManifestBlob.insert_many(blobs_to_insert).execute()
# Set the legacy image (if applicable).
if legacy_image is not None:
ManifestLegacyImage.create(
repository=repository_id, image=legacy_image, manifest=manifest
)
# Insert the manifest child rows (if applicable).
if child_manifest_rows:
children_to_insert = [
dict(manifest=manifest, child_manifest=child_manifest, repository=repository_id)
for child_manifest in child_manifest_rows.values()
]
ManifestChild.insert_many(children_to_insert).execute()
# If this manifest is being created not for immediate tagging, add a temporary tag to the
# manifest to ensure it isn't being GCed. If the manifest *is* for tagging, then since we're
# creating a new one here, it cannot be GCed (since it isn't referenced by anything yet), so
# its safe to elide the temp tag operation. If we ever change GC code to collect *all* manifests
# in a repository for GC, then we will have to reevaluate this optimization at that time.
if not for_tagging:
create_temporary_tag_if_necessary(manifest, temp_tag_expiration_sec)
# Define the labels for the manifest (if any).
# TODO: Once the old data model is gone, turn this into a batch operation and make the label
# application to the manifest occur under the transaction.
labels = manifest_interface_instance.get_manifest_labels(retriever)
if labels:
for key, value in labels.iteritems():
# NOTE: There can technically be empty label keys via Dockerfile's. We ignore any
# such `labels`, as they don't really mean anything.
if not key:
continue
media_type = "application/json" if is_json(value) else "text/plain"
create_manifest_label(manifest, key, value, "manifest", media_type)
# Return the dictionary of labels to apply (i.e. those labels that cause an action to be taken
# on the manifest or its resulting tags). We only return those labels either defined on
# the manifest or shared amongst all the child manifests. We intersect amongst all child manifests
# to ensure that any action performed is defined in all manifests.
labels_to_apply = labels or {}
if child_manifest_label_dicts:
labels_to_apply = child_manifest_label_dicts[0].viewitems()
for child_manifest_label_dict in child_manifest_label_dicts[1:]:
# Intersect the key+values of the labels to ensure we get the exact same result
# for all the child manifests.
labels_to_apply = labels_to_apply & child_manifest_label_dict.viewitems()
labels_to_apply = dict(labels_to_apply)
return CreatedManifest(manifest=manifest, newly_created=True, labels_to_apply=labels_to_apply)
def _populate_legacy_image(
repository_id, manifest_interface_instance, blob_map, retriever, raise_on_error=False
):
# Lookup all the images and their parent images (if any) inside the manifest.
# This will let us know which v1 images we need to synthesize and which ones are invalid.
docker_image_ids = list(manifest_interface_instance.get_legacy_image_ids(retriever))
images_query = lookup_repository_images(repository_id, docker_image_ids)
image_storage_map = {i.docker_image_id: i.storage for i in images_query}
# Rewrite any v1 image IDs that do not match the checksum in the database.
try:
rewritten_images = manifest_interface_instance.generate_legacy_layers(
image_storage_map, retriever
)
rewritten_images = list(rewritten_images)
parent_image_map = {}
for rewritten_image in rewritten_images:
if not rewritten_image.image_id in image_storage_map:
parent_image = None
if rewritten_image.parent_image_id:
parent_image = parent_image_map.get(rewritten_image.parent_image_id)
if parent_image is None:
parent_image = get_image(repository_id, rewritten_image.parent_image_id)
if parent_image is None:
if raise_on_error:
raise CreateManifestException(
"Missing referenced parent image %s"
% rewritten_image.parent_image_id
)
return None
storage_reference = blob_map[rewritten_image.content_checksum]
synthesized = synthesize_v1_image(
repository_id,
storage_reference.id,
storage_reference.image_size,
rewritten_image.image_id,
rewritten_image.created,
rewritten_image.comment,
rewritten_image.command,
rewritten_image.compat_json,
parent_image,
)
parent_image_map[rewritten_image.image_id] = synthesized
except ManifestException as me:
logger.exception("exception when rewriting v1 metadata")
if raise_on_error:
raise CreateManifestException(me)
return None
return rewritten_images[-1].image_id
| 39.638636
| 104
| 0.669228
|
15aee82868fdc81fa59a8bfeeb6fbd75ab04cd86
| 3,106
|
py
|
Python
|
sleekxmpp/plugins/xep_0313/mam.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | 3
|
2019-02-01T06:50:08.000Z
|
2020-03-24T00:45:31.000Z
|
sleekxmpp/plugins/xep_0313/mam.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | 1
|
2017-11-07T13:03:48.000Z
|
2017-11-07T13:03:48.000Z
|
sleekxmpp/plugins/xep_0313/mam.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permissio
"""
import logging
import sleekxmpp
from sleekxmpp.stanza import Message, Iq
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.xmlstream.handler import Collector
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.xep_0313 import stanza
log = logging.getLogger(__name__)
class XEP_0313(BasePlugin):
"""
XEP-0313 Message Archive Management
"""
name = 'xep_0313'
description = 'XEP-0313: Message Archive Management'
dependencies = set(['xep_0030', 'xep_0050', 'xep_0059', 'xep_0297'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(Iq, stanza.MAM)
register_stanza_plugin(Iq, stanza.Preferences)
register_stanza_plugin(Message, stanza.Result)
register_stanza_plugin(stanza.MAM, self.xmpp['xep_0059'].stanza.Set)
def retrieve(self, jid=None, start=None, end=None, with_jid=None, ifrom=None,
block=True, timeout=None, callback=None, iterator=False):
iq = self.xmpp.Iq()
query_id = iq['id']
iq['to'] = jid
iq['from'] = ifrom
iq['type'] = 'get'
iq['mam']['queryid'] = query_id
iq['mam']['start'] = start
iq['mam']['end'] = end
iq['mam']['with'] = with_jid
collector = Collector(
'MAM_Results_%s' % query_id,
StanzaPath('message/mam_result@queryid=%s' % query_id))
self.xmpp.register_handler(collector)
if iterator:
return self.xmpp['xep_0059'].iterate(iq, 'mam', 'results')
elif not block and callback is not None:
def wrapped_cb(iq):
results = collector.stop()
if iq['type'] == 'result':
iq['mam']['results'] = results
callback(iq)
return iq.send(block=block, timeout=timeout, callback=wrapped_cb)
else:
try:
resp = iq.send(block=block, timeout=timeout, callback=callback)
resp['mam']['results'] = collector.stop()
return resp
except XMPPError as e:
collector.stop()
raise e
def set_preferences(self, jid=None, default=None, always=None, never=None,
ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = jid
iq['from'] = ifrom
iq['mam_prefs']['default'] = default
iq['mam_prefs']['always'] = always
iq['mam_prefs']['never'] = never
return iq.send(block=block, timeout=timeout, callback=callback)
def get_configuration_commands(self, jid, **kwargs):
return self.xmpp['xep_0030'].get_items(
jid=jid,
node='urn:xmpp:mam#configure',
**kwargs)
| 33.397849
| 81
| 0.60367
|
7b3e22e43f2c88abb06e97634c5668ffc6b45e4c
| 51,712
|
py
|
Python
|
cinder/tests/test_pure.py
|
yanheven/cinder
|
89797971f30d547acbf715fea099c52d90966d1f
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/test_pure.py
|
yanheven/cinder
|
89797971f30d547acbf715fea099c52d90966d1f
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/test_pure.py
|
yanheven/cinder
|
89797971f30d547acbf715fea099c52d90966d1f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_concurrency import processutils
from oslo_utils import units
from cinder import exception
from cinder import test
def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2):
def _decorator(f):
return f
return _decorator
patch_retry = mock.patch('cinder.utils.retry', fake_retry)
patch_retry.start()
sys.modules['purestorage'] = mock.Mock()
from cinder.volume.drivers import pure
# Only mock utils.retry for cinder.volume.drivers.pure import
patch_retry.stop()
DRIVER_PATH = "cinder.volume.drivers.pure"
DRIVER_OBJ = DRIVER_PATH + ".PureISCSIDriver"
ARRAY_OBJ = DRIVER_PATH + ".FlashArray"
TARGET = "pure-target"
API_TOKEN = "12345678-abcd-1234-abcd-1234567890ab"
VOLUME_BACKEND_NAME = "Pure_iSCSI"
PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"]
ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(PORT_NAMES))]
HOSTNAME = "computenode1"
PURE_HOST_NAME = pure._generate_purity_host_name(HOSTNAME)
PURE_HOST = {"name": PURE_HOST_NAME,
"hgroup": None,
"iqn": [],
"wwn": [],
}
REST_VERSION = "1.2"
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
"consistencygroup_id": None
}
VOLUME_WITH_CGROUP = VOLUME.copy()
VOLUME_WITH_CGROUP['consistencygroup_id'] = \
"4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
SRC_VOL_ID = "dc7a294d-5964-4379-a15f-ce5554734efc"
SRC_VOL = {"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": 'fake_src',
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
"consistencygroup_id": None
}
SNAPSHOT_ID = "04fe2f9a-d0c4-4564-a30d-693cc3657b47"
SNAPSHOT = {"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": SRC_VOL_ID,
"volume_name": "volume-" + SRC_VOL_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"cgsnapshot_id": None
}
SNAPSHOT_WITH_CGROUP = SNAPSHOT.copy()
SNAPSHOT_WITH_CGROUP['cgsnapshot_id'] = \
"4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
INITIATOR_IQN = "iqn.1993-08.org.debian:01:222"
CONNECTOR = {"initiator": INITIATOR_IQN, "host": HOSTNAME}
TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc"
TARGET_PORT = "3260"
ISCSI_PORTS = [{"name": name,
"iqn": TARGET_IQN,
"portal": ip + ":" + TARGET_PORT,
"wwn": None,
} for name, ip in zip(PORT_NAMES, ISCSI_IPS)]
NON_ISCSI_PORT = {"name": "ct0.fc1",
"iqn": None,
"portal": None,
"wwn": "5001500150015081",
}
PORTS_WITH = ISCSI_PORTS + [NON_ISCSI_PORT]
PORTS_WITHOUT = [NON_ISCSI_PORT]
VOLUME_CONNECTIONS = [{"host": "h1", "name": VOLUME["name"] + "-cinder"},
{"host": "h2", "name": VOLUME["name"] + "-cinder"},
]
TOTAL_CAPACITY = 50.0
USED_SPACE = 32.1
PROVISIONED_CAPACITY = 70.0
DEFAULT_OVER_SUBSCRIPTION = 20
SPACE_INFO = {"capacity": TOTAL_CAPACITY * units.Gi,
"total": USED_SPACE * units.Gi
}
SPACE_INFO_EMPTY = {"capacity": TOTAL_CAPACITY * units.Gi,
"total": 0
}
CONNECTION_INFO = {"driver_volume_type": "iscsi",
"data": {"target_iqn": TARGET_IQN,
"target_portal": ISCSI_IPS[0] + ":" + TARGET_PORT,
"target_lun": 1,
"target_discovered": True,
"access_mode": "rw",
},
}
class FakePureStorageHTTPError(Exception):
def __init__(self, target=None, rest_version=None, code=None,
headers=None, text=None):
self.target = target
self.rest_version = rest_version
self.code = code
self.headers = headers
self.text = text
class PureISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(PureISCSIDriverTestCase, self).setUp()
self.mock_config = mock.Mock()
self.mock_config.san_ip = TARGET
self.mock_config.pure_api_token = API_TOKEN
self.mock_config.volume_backend_name = VOLUME_BACKEND_NAME
self.mock_config.use_chap_auth = False
self.driver = pure.PureISCSIDriver(configuration=self.mock_config)
self.array = mock.Mock()
self.driver._array = self.array
self.purestorage_module = pure.purestorage
self.purestorage_module.PureHTTPError = FakePureStorageHTTPError
@mock.patch(DRIVER_OBJ + "._choose_target_iscsi_port")
def test_do_setup(self, mock_choose_target_iscsi_port):
mock_choose_target_iscsi_port.return_value = ISCSI_PORTS[0]
self.purestorage_module.FlashArray.return_value = self.array
self.array.get_rest_version.return_value = \
self.driver.SUPPORTED_REST_API_VERSIONS[0]
self.driver.do_setup(None)
self.purestorage_module.FlashArray.assert_called_with(
TARGET,
api_token=API_TOKEN
)
self.assertEqual(self.array, self.driver._array)
self.assertEqual(
self.driver.SUPPORTED_REST_API_VERSIONS,
self.purestorage_module.FlashArray.supported_rest_versions
)
mock_choose_target_iscsi_port.assert_called_with()
self.assertEqual(ISCSI_PORTS[0], self.driver._iscsi_port)
self.assert_error_propagates(
[
self.purestorage_module.FlashArray,
mock_choose_target_iscsi_port
],
self.driver.do_setup, None
)
def assert_error_propagates(self, mocks, func, *args, **kwargs):
"""Assert that errors from mocks propagate to func.
Fail if exceptions raised by mocks are not seen when calling
func(*args, **kwargs). Ensure that we are really seeing exceptions
from the mocks by failing if just running func(*args, **kargs) raises
an exception itself.
"""
func(*args, **kwargs)
for mock_func in mocks:
mock_func.side_effect = exception.PureDriverException(
reason="reason")
self.assertRaises(exception.PureDriverException,
func, *args, **kwargs)
mock_func.side_effect = None
def test_generate_purity_host_name(self):
generate = pure._generate_purity_host_name
result = generate("really-long-string-thats-a-bit-too-long")
self.assertTrue(result.startswith("really-long-string-that-"))
self.assertTrue(result.endswith("-cinder"))
self.assertEqual(len(result), 63)
self.assertTrue(pure.GENERATED_NAME.match(result))
result = generate("!@#$%^-invalid&*")
self.assertTrue(result.startswith("invalid---"))
self.assertTrue(result.endswith("-cinder"))
self.assertEqual(len(result), 49)
self.assertTrue(pure.GENERATED_NAME.match(result))
def test_create_volume(self):
self.driver.create_volume(VOLUME)
self.array.create_volume.assert_called_with(
VOLUME["name"] + "-cinder", 2 * units.Gi)
self.assert_error_propagates([self.array.create_volume],
self.driver.create_volume, VOLUME)
@mock.patch(DRIVER_OBJ + "._add_volume_to_consistency_group",
autospec=True)
def test_create_volume_with_cgroup(self, mock_add_to_cgroup):
vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder"
self.driver.create_volume(VOLUME_WITH_CGROUP)
mock_add_to_cgroup\
.assert_called_with(self.driver,
VOLUME_WITH_CGROUP['consistencygroup_id'],
vol_name)
def test_create_volume_from_snapshot(self):
vol_name = VOLUME["name"] + "-cinder"
snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"]
# Branch where extend unneeded
self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.array.copy_volume.assert_called_with(snap_name, vol_name)
self.assertFalse(self.array.extend_volume.called)
self.assert_error_propagates(
[self.array.copy_volume],
self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT)
self.assertFalse(self.array.extend_volume.called)
# Branch where extend needed
SNAPSHOT["volume_size"] = 1 # resize so smaller than VOLUME
self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
expected = [mock.call.copy_volume(snap_name, vol_name),
mock.call.extend_volume(vol_name, 2 * units.Gi)]
self.array.assert_has_calls(expected)
self.assert_error_propagates(
[self.array.copy_volume, self.array.extend_volume],
self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT)
SNAPSHOT["volume_size"] = 2 # reset size
@mock.patch(DRIVER_OBJ + "._add_volume_to_consistency_group",
autospec=True)
@mock.patch(DRIVER_OBJ + "._extend_if_needed", autospec=True)
@mock.patch(DRIVER_PATH + "._get_pgroup_vol_snap_name", autospec=True)
def test_create_volume_from_cgsnapshot(self, mock_get_snap_name,
mock_extend_if_needed,
mock_add_to_cgroup):
vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder"
snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \
"e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075."\
+ vol_name
mock_get_snap_name.return_value = snap_name
self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP,
SNAPSHOT_WITH_CGROUP)
self.array.copy_volume.assert_called_with(snap_name, vol_name)
self.assertTrue(mock_get_snap_name.called)
self.assertTrue(mock_extend_if_needed.called)
self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP,
SNAPSHOT_WITH_CGROUP)
mock_add_to_cgroup\
.assert_called_with(self.driver,
VOLUME_WITH_CGROUP['consistencygroup_id'],
vol_name)
def test_create_cloned_volume(self):
vol_name = VOLUME["name"] + "-cinder"
src_name = SRC_VOL["name"] + "-cinder"
# Branch where extend unneeded
self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.array.copy_volume.assert_called_with(src_name, vol_name)
self.assertFalse(self.array.extend_volume.called)
self.assert_error_propagates(
[self.array.copy_volume],
self.driver.create_cloned_volume, VOLUME, SRC_VOL)
self.assertFalse(self.array.extend_volume.called)
# Branch where extend needed
SRC_VOL["size"] = 1 # resize so smaller than VOLUME
self.driver.create_cloned_volume(VOLUME, SRC_VOL)
expected = [mock.call.copy_volume(src_name, vol_name),
mock.call.extend_volume(vol_name, 2 * units.Gi)]
self.array.assert_has_calls(expected)
self.assert_error_propagates(
[self.array.copy_volume, self.array.extend_volume],
self.driver.create_cloned_volume, VOLUME, SRC_VOL)
SRC_VOL["size"] = 2 # reset size
@mock.patch(DRIVER_OBJ + "._add_volume_to_consistency_group",
autospec=True)
def test_create_cloned_volume_with_cgroup(self, mock_add_to_cgroup):
vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder"
self.driver.create_cloned_volume(VOLUME_WITH_CGROUP, SRC_VOL)
mock_add_to_cgroup\
.assert_called_with(self.driver,
VOLUME_WITH_CGROUP['consistencygroup_id'],
vol_name)
def test_delete_volume_already_deleted(self):
self.array.list_volume_private_connections.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Volume does not exist"
)
self.driver.delete_volume(VOLUME)
self.assertFalse(self.array.destroy_volume.called)
# Testing case where array.destroy_volume returns an exception
# because volume has already been deleted
self.array.list_volume_private_connections.side_effect = None
self.array.list_volume_private_connections.return_value = {}
self.array.destroy_volume.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Volume does not exist"
)
self.driver.delete_volume(VOLUME)
self.assertTrue(self.array.destroy_volume.called)
def test_delete_volume(self):
vol_name = VOLUME["name"] + "-cinder"
self.array.list_volume_private_connections.return_value = {}
self.driver.delete_volume(VOLUME)
expected = [mock.call.destroy_volume(vol_name)]
self.array.assert_has_calls(expected)
self.array.destroy_volume.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="reason")
self.driver.delete_snapshot(SNAPSHOT)
self.array.destroy_volume.side_effect = None
self.assert_error_propagates([self.array.destroy_volume],
self.driver.delete_volume, VOLUME)
def test_delete_connected_volume(self):
vol_name = VOLUME["name"] + "-cinder"
host_name_a = "ha"
host_name_b = "hb"
self.array.list_volume_private_connections.return_value = [{
"host": host_name_a,
"lun": 7,
"name": vol_name,
"size": 3221225472
}, {
"host": host_name_b,
"lun": 2,
"name": vol_name,
"size": 3221225472
}]
self.driver.delete_volume(VOLUME)
expected = [mock.call.list_volume_private_connections(vol_name),
mock.call.disconnect_host(host_name_a, vol_name),
mock.call.disconnect_host(host_name_b, vol_name),
mock.call.destroy_volume(vol_name)]
self.array.assert_has_calls(expected)
def test_create_snapshot(self):
vol_name = SRC_VOL["name"] + "-cinder"
self.driver.create_snapshot(SNAPSHOT)
self.array.create_snapshot.assert_called_with(
vol_name,
suffix=SNAPSHOT["name"]
)
self.assert_error_propagates([self.array.create_snapshot],
self.driver.create_snapshot, SNAPSHOT)
def test_delete_snapshot(self):
snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"]
self.driver.delete_snapshot(SNAPSHOT)
expected = [mock.call.destroy_volume(snap_name)]
self.array.assert_has_calls(expected)
self.array.destroy_volume.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="reason")
self.driver.delete_snapshot(SNAPSHOT)
self.array.destroy_volume.side_effect = None
self.assert_error_propagates([self.array.destroy_volume],
self.driver.delete_snapshot, SNAPSHOT)
@mock.patch(DRIVER_OBJ + "._connect")
@mock.patch(DRIVER_OBJ + "._get_target_iscsi_port")
def test_initialize_connection(self, mock_get_iscsi_port, mock_connection):
mock_get_iscsi_port.return_value = ISCSI_PORTS[0]
mock_connection.return_value = {"vol": VOLUME["name"] + "-cinder",
"lun": 1,
}
result = CONNECTION_INFO
real_result = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertDictMatch(result, real_result)
mock_get_iscsi_port.assert_called_with()
mock_connection.assert_called_with(VOLUME, CONNECTOR, None)
self.assert_error_propagates([mock_get_iscsi_port, mock_connection],
self.driver.initialize_connection,
VOLUME, CONNECTOR)
@mock.patch(DRIVER_OBJ + "._connect")
@mock.patch(DRIVER_OBJ + "._get_target_iscsi_port")
def test_initialize_connection_with_auth(self, mock_get_iscsi_port,
mock_connection):
auth_type = "CHAP"
chap_username = CONNECTOR["host"]
chap_password = "password"
mock_get_iscsi_port.return_value = ISCSI_PORTS[0]
initiator_update = [{"key": pure.CHAP_SECRET_KEY,
"value": chap_password}]
mock_connection.return_value = {
"vol": VOLUME["name"] + "-cinder",
"lun": 1,
"auth_username": chap_username,
"auth_password": chap_password,
}
result = CONNECTION_INFO.copy()
result["data"]["auth_method"] = auth_type
result["data"]["auth_username"] = chap_username
result["data"]["auth_password"] = chap_password
self.mock_config.use_chap_auth = True
# Branch where no credentials were generated
real_result = self.driver.initialize_connection(VOLUME,
CONNECTOR)
mock_connection.assert_called_with(VOLUME, CONNECTOR, None)
self.assertDictMatch(result, real_result)
# Branch where new credentials were generated
mock_connection.return_value["initiator_update"] = initiator_update
result["initiator_update"] = initiator_update
real_result = self.driver.initialize_connection(VOLUME,
CONNECTOR)
mock_connection.assert_called_with(VOLUME, CONNECTOR, None)
self.assertDictMatch(result, real_result)
self.assert_error_propagates([mock_get_iscsi_port, mock_connection],
self.driver.initialize_connection,
VOLUME, CONNECTOR)
@mock.patch(DRIVER_OBJ + "._choose_target_iscsi_port")
@mock.patch(DRIVER_OBJ + "._run_iscsiadm_bare")
def test_get_target_iscsi_port(self, mock_iscsiadm, mock_choose_port):
self.driver._iscsi_port = ISCSI_PORTS[1]
self.assertEqual(self.driver._get_target_iscsi_port(), ISCSI_PORTS[1])
mock_iscsiadm.assert_called_with(["-m", "discovery",
"-t", "sendtargets",
"-p", ISCSI_PORTS[1]["portal"]])
self.assertFalse(mock_choose_port.called)
mock_iscsiadm.side_effect = [processutils.ProcessExecutionError, None]
mock_choose_port.return_value = ISCSI_PORTS[2]
self.assertEqual(self.driver._get_target_iscsi_port(), ISCSI_PORTS[2])
mock_choose_port.assert_called_with()
mock_iscsiadm.side_effect = processutils.ProcessExecutionError
self.assert_error_propagates([mock_choose_port],
self.driver._get_target_iscsi_port)
@mock.patch(DRIVER_OBJ + "._run_iscsiadm_bare")
def test_choose_target_iscsi_port(self, mock_iscsiadm):
self.array.list_ports.return_value = PORTS_WITHOUT
self.assertRaises(exception.PureDriverException,
self.driver._choose_target_iscsi_port)
self.array.list_ports.return_value = PORTS_WITH
self.assertEqual(ISCSI_PORTS[0],
self.driver._choose_target_iscsi_port())
self.assert_error_propagates([mock_iscsiadm, self.array.list_ports],
self.driver._choose_target_iscsi_port)
@mock.patch(DRIVER_PATH + "._generate_chap_secret", autospec=True)
@mock.patch(DRIVER_OBJ + "._get_host", autospec=True)
@mock.patch(DRIVER_PATH + "._generate_purity_host_name", autospec=True)
def test_connect(self, mock_generate, mock_host, mock_gen_secret):
vol_name = VOLUME["name"] + "-cinder"
result = {"vol": vol_name, "lun": 1}
# Branch where host already exists
mock_host.return_value = PURE_HOST
self.array.connect_host.return_value = {"vol": vol_name, "lun": 1}
real_result = self.driver._connect(VOLUME, CONNECTOR, None)
self.assertEqual(result, real_result)
mock_host.assert_called_with(self.driver, CONNECTOR)
self.assertFalse(mock_generate.called)
self.assertFalse(self.array.create_host.called)
self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name)
# Branch where new host is created
mock_host.return_value = None
mock_generate.return_value = PURE_HOST_NAME
real_result = self.driver._connect(VOLUME, CONNECTOR, None)
mock_host.assert_called_with(self.driver, CONNECTOR)
mock_generate.assert_called_with(HOSTNAME)
self.array.create_host.assert_called_with(PURE_HOST_NAME,
iqnlist=[INITIATOR_IQN])
self.assertEqual(result, real_result)
mock_generate.reset_mock()
self.array.reset_mock()
self.assert_error_propagates(
[mock_host, mock_generate, self.array.connect_host,
self.array.create_host],
self.driver._connect, VOLUME, CONNECTOR, None)
self.mock_config.use_chap_auth = True
chap_user = CONNECTOR["host"]
chap_password = "sOmEseCr3t"
# Branch where chap is used and credentials already exist
initiator_data = [{"key": pure.CHAP_SECRET_KEY,
"value": chap_password}]
self.driver._connect(VOLUME, CONNECTOR, initiator_data)
result["auth_username"] = chap_user
result["auth_password"] = chap_password
self.assertDictMatch(result, real_result)
self.array.set_host.assert_called_with(PURE_HOST_NAME,
host_user=chap_user,
host_password=chap_password)
# Branch where chap is used and credentials are generated
mock_gen_secret.return_value = chap_password
self.driver._connect(VOLUME, CONNECTOR, None)
result["auth_username"] = chap_user
result["auth_password"] = chap_password
result["initiator_update"] = {
"set_values": {
pure.CHAP_SECRET_KEY: chap_password
}
}
self.assertDictMatch(result, real_result)
self.array.set_host.assert_called_with(PURE_HOST_NAME,
host_user=chap_user,
host_password=chap_password)
@mock.patch(DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected(self, mock_host):
mock_host.return_value = PURE_HOST
expected = {"host": PURE_HOST_NAME, "lun": 1}
self.array.list_volume_private_connections.return_value = \
[expected, {"host": "extra", "lun": 2}]
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
actual = self.driver._connect(VOLUME, CONNECTOR, None)
self.assertEqual(expected, actual)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
@mock.patch(DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected_list_hosts_empty(self, mock_host):
mock_host.return_value = PURE_HOST
self.array.list_volume_private_connections.return_value = {}
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
self.assertRaises(exception.PureDriverException, self.driver._connect,
VOLUME, CONNECTOR, None)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
@mock.patch(DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected_list_hosts_exception(self, mock_host):
mock_host.return_value = PURE_HOST
self.array.list_volume_private_connections.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="")
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver._connect, VOLUME, CONNECTOR, None)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
def test_get_host(self):
good_host = PURE_HOST.copy()
good_host.update(iqn=["another-wrong-iqn", INITIATOR_IQN])
bad_host = {"name": "bad-host", "iqn": ["wrong-iqn"]}
self.array.list_hosts.return_value = [bad_host]
real_result = self.driver._get_host(CONNECTOR)
self.assertIs(real_result, None)
self.array.list_hosts.return_value.append(good_host)
real_result = self.driver._get_host(CONNECTOR)
self.assertEqual(real_result, good_host)
self.assert_error_propagates([self.array.list_hosts],
self.driver._get_host, CONNECTOR)
@mock.patch(DRIVER_OBJ + "._get_host", autospec=True)
def test_terminate_connection(self, mock_host):
vol_name = VOLUME["name"] + "-cinder"
mock_host.return_value = {"name": "some-host"}
# Branch with manually created host
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.array.disconnect_host.assert_called_with("some-host", vol_name)
self.assertFalse(self.array.list_host_connections.called)
self.assertFalse(self.array.delete_host.called)
# Branch with host added to host group
self.array.reset_mock()
self.array.list_host_connections.return_value = []
mock_host.return_value = PURE_HOST.copy()
mock_host.return_value.update(hgroup="some-group")
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.assertTrue(self.array.list_host_connections.called)
self.assertTrue(self.array.delete_host.called)
# Branch with host still having connected volumes
self.array.reset_mock()
self.array.list_host_connections.return_value = [
{"lun": 2, "name": PURE_HOST_NAME, "vol": "some-vol"}]
mock_host.return_value = PURE_HOST
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.assertFalse(self.array.delete_host.called)
# Branch where host gets deleted
self.array.reset_mock()
self.array.list_host_connections.return_value = []
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.array.delete_host.assert_called_with(PURE_HOST_NAME)
# Branch where connection is missing and the host is still deleted
self.array.reset_mock()
self.array.disconnect_host.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="reason")
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.array.delete_host.assert_called_with(PURE_HOST_NAME)
# Branch where an unexpected exception occurs
self.array.reset_mock()
self.array.disconnect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=500,
text="Some other error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.terminate_connection, VOLUME, CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.assertFalse(self.array.list_host_connections.called)
self.assertFalse(self.array.delete_host.called)
@mock.patch(DRIVER_OBJ + "._get_provisioned_space", autospec=True)
def test_get_volume_stats(self, mock_space):
mock_space.return_value = PROVISIONED_CAPACITY * units.Gi
self.assertEqual(self.driver.get_volume_stats(), {})
self.array.get.return_value = SPACE_INFO
result = {"volume_backend_name": VOLUME_BACKEND_NAME,
"vendor_name": "Pure Storage",
"driver_version": self.driver.VERSION,
"storage_protocol": "iSCSI",
"total_capacity_gb": TOTAL_CAPACITY,
"free_capacity_gb": TOTAL_CAPACITY - USED_SPACE,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": PROVISIONED_CAPACITY,
"max_over_subscription_ratio": (PROVISIONED_CAPACITY /
USED_SPACE)
}
real_result = self.driver.get_volume_stats(refresh=True)
self.assertDictMatch(result, real_result)
self.assertDictMatch(result, self.driver._stats)
@mock.patch(DRIVER_OBJ + "._get_provisioned_space", autospec=True)
def test_get_volume_stats_empty_array(self, mock_space):
mock_space.return_value = PROVISIONED_CAPACITY * units.Gi
self.assertEqual(self.driver.get_volume_stats(), {})
self.array.get.return_value = SPACE_INFO_EMPTY
result = {"volume_backend_name": VOLUME_BACKEND_NAME,
"vendor_name": "Pure Storage",
"driver_version": self.driver.VERSION,
"storage_protocol": "iSCSI",
"total_capacity_gb": TOTAL_CAPACITY,
"free_capacity_gb": TOTAL_CAPACITY,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": PROVISIONED_CAPACITY,
"max_over_subscription_ratio": DEFAULT_OVER_SUBSCRIPTION
}
real_result = self.driver.get_volume_stats(refresh=True)
self.assertDictMatch(result, real_result)
self.assertDictMatch(result, self.driver._stats)
@mock.patch(DRIVER_OBJ + "._get_provisioned_space", autospec=True)
def test_get_volume_stats_nothing_provisioned(self, mock_space):
mock_space.return_value = 0
self.assertEqual(self.driver.get_volume_stats(), {})
self.array.get.return_value = SPACE_INFO
result = {"volume_backend_name": VOLUME_BACKEND_NAME,
"vendor_name": "Pure Storage",
"driver_version": self.driver.VERSION,
"storage_protocol": "iSCSI",
"total_capacity_gb": TOTAL_CAPACITY,
"free_capacity_gb": TOTAL_CAPACITY - USED_SPACE,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": 0,
"max_over_subscription_ratio": DEFAULT_OVER_SUBSCRIPTION
}
real_result = self.driver.get_volume_stats(refresh=True)
self.assertDictMatch(result, real_result)
self.assertDictMatch(result, self.driver._stats)
def test_extend_volume(self):
vol_name = VOLUME["name"] + "-cinder"
self.driver.extend_volume(VOLUME, 3)
self.array.extend_volume.assert_called_with(vol_name, 3 * units.Gi)
self.assert_error_propagates([self.array.extend_volume],
self.driver.extend_volume, VOLUME, 3)
def test_get_pgroup_name_from_id(self):
id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
expected_name = "consisgroup-%s-cinder" % id
actual_name = pure._get_pgroup_name_from_id(id)
self.assertEqual(expected_name, actual_name)
def test_get_pgroup_snap_suffix(self):
cgsnap = mock.Mock()
cgsnap.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
expected_suffix = "cgsnapshot-%s-cinder" % cgsnap.id
actual_suffix = pure._get_pgroup_snap_suffix(cgsnap)
self.assertEqual(expected_suffix, actual_suffix)
def test_get_pgroup_snap_name(self):
cg_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
cgsnap_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_cgsnap = mock.Mock()
mock_cgsnap.consistencygroup_id = cg_id
mock_cgsnap.id = cgsnap_id
expected_name = "consisgroup-%(cg)s-cinder.cgsnapshot-%(snap)s-cinder"\
% {"cg": cg_id, "snap": cgsnap_id}
actual_name = pure._get_pgroup_snap_name(mock_cgsnap)
self.assertEqual(expected_name, actual_name)
def test_get_pgroup_vol_snap_name(self):
cg_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
cgsnap_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
volume_name = "volume-4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_snap = mock.Mock()
mock_snap.cgsnapshot = mock.Mock()
mock_snap.cgsnapshot.consistencygroup_id = cg_id
mock_snap.cgsnapshot.id = cgsnap_id
mock_snap.volume_name = volume_name
expected_name = "consisgroup-%(cg)s-cinder.cgsnapshot-%(snap)s-cinder"\
".%(vol)s-cinder" % {"cg": cg_id,
"snap": cgsnap_id,
"vol": volume_name}
actual_name = pure._get_pgroup_vol_snap_name(mock_snap)
self.assertEqual(expected_name, actual_name)
def test_create_consistencygroup(self):
mock_cgroup = mock.Mock()
mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
model_update = self.driver.create_consistencygroup(None, mock_cgroup)
expected_name = pure._get_pgroup_name_from_id(mock_cgroup.id)
self.array.create_pgroup.assert_called_with(expected_name)
self.assertEqual({'status': 'available'}, model_update)
self.assert_error_propagates(
[self.array.create_pgroup],
self.driver.create_consistencygroup, None, mock_cgroup)
@mock.patch(DRIVER_OBJ + ".create_volume_from_snapshot")
@mock.patch(DRIVER_OBJ + ".create_consistencygroup")
def test_create_consistencygroup_from_src(self, mock_create_cg,
mock_create_vol):
mock_context = mock.Mock()
mock_group = mock.Mock()
mock_cgsnapshot = mock.Mock()
mock_snapshots = [mock.Mock() for i in range(5)]
mock_volumes = [mock.Mock() for i in range(5)]
self.driver.create_consistencygroup_from_src(
mock_context,
mock_group,
mock_volumes,
cgsnapshot=mock_cgsnapshot,
snapshots=mock_snapshots
)
mock_create_cg.assert_called_with(mock_context, mock_group)
expected_calls = [mock.call(vol, snap)
for vol, snap in zip(mock_volumes, mock_snapshots)]
mock_create_vol.assert_has_calls(expected_calls,
any_order=True)
self.assert_error_propagates(
[mock_create_vol, mock_create_cg],
self.driver.create_consistencygroup_from_src,
mock_context,
mock_group,
mock_volumes,
cgsnapshot=mock_cgsnapshot,
snapshots=mock_snapshots
)
def test_create_consistencygroup_from_src_no_snap(self):
# Expect an error when no cgsnapshot or snapshots are provided
self.assertRaises(exception.InvalidInput,
self.driver.create_consistencygroup_from_src,
mock.Mock(), # context
mock.Mock(), # group
[mock.Mock()]) # volumes
@mock.patch(DRIVER_OBJ + ".delete_volume", autospec=True)
def test_delete_consistencygroup(self, mock_delete_volume):
mock_cgroup = mock.MagicMock()
mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
mock_cgroup['status'] = "deleted"
mock_context = mock.Mock()
self.driver.db = mock.Mock()
mock_volume = mock.MagicMock()
expected_volumes = [mock_volume]
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
model_update, volumes = \
self.driver.delete_consistencygroup(mock_context, mock_cgroup)
expected_name = pure._get_pgroup_name_from_id(mock_cgroup.id)
self.array.destroy_pgroup.assert_called_with(expected_name)
self.assertEqual(expected_volumes, volumes)
self.assertEqual(mock_cgroup['status'], model_update['status'])
mock_delete_volume.assert_called_with(self.driver, mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group has been destroyed."
)
self.driver.delete_consistencygroup(mock_context, mock_cgroup)
self.array.destroy_pgroup.assert_called_with(expected_name)
mock_delete_volume.assert_called_with(self.driver, mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group does not exist"
)
self.driver.delete_consistencygroup(mock_context, mock_cgroup)
self.array.destroy_pgroup.assert_called_with(expected_name)
mock_delete_volume.assert_called_with(self.driver, mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Some other error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_consistencygroup,
mock_context,
mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=500,
text="Another different error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_consistencygroup,
mock_context,
mock_volume)
self.array.destroy_pgroup.side_effect = None
self.assert_error_propagates(
[self.array.destroy_pgroup],
self.driver.delete_consistencygroup, mock_context, mock_cgroup)
def _create_mock_cg(self):
mock_group = mock.MagicMock()
mock_group.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
mock_group.status = "Available"
mock_group.cg_name = "consisgroup-" + mock_group.id + "-cinder"
return mock_group
def test_update_consistencygroup(self):
mock_group = self._create_mock_cg()
add_vols = [
{'name': 'vol1'},
{'name': 'vol2'},
{'name': 'vol3'},
]
expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols]
remove_vols = [
{'name': 'vol4'},
{'name': 'vol5'}
]
expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols]
self.driver.update_consistencygroup(mock.Mock(), mock_group,
add_vols, remove_vols)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=expected_addvollist,
remvollist=expected_remvollist
)
def test_update_consistencygroup_no_add_vols(self):
mock_group = self._create_mock_cg()
expected_addvollist = []
remove_vols = [
{'name': 'vol4'},
{'name': 'vol5'}
]
expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols]
self.driver.update_consistencygroup(mock.Mock(), mock_group,
None, remove_vols)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=expected_addvollist,
remvollist=expected_remvollist
)
def test_update_consistencygroup_no_remove_vols(self):
mock_group = self._create_mock_cg()
add_vols = [
{'name': 'vol1'},
{'name': 'vol2'},
{'name': 'vol3'},
]
expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols]
expected_remvollist = []
self.driver.update_consistencygroup(mock.Mock(), mock_group,
add_vols, None)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=expected_addvollist,
remvollist=expected_remvollist
)
def test_update_consistencygroup_no_vols(self):
mock_group = self._create_mock_cg()
self.driver.update_consistencygroup(mock.Mock(), mock_group,
None, None)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=[],
remvollist=[]
)
def test_create_cgsnapshot(self):
mock_cgsnap = mock.Mock()
mock_cgsnap.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
mock_cgsnap.consistencygroup_id = \
"4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_context = mock.Mock()
self.driver.db = mock.Mock()
mock_snap = mock.MagicMock()
expected_snaps = [mock_snap]
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = \
expected_snaps
model_update, snapshots = \
self.driver.create_cgsnapshot(mock_context, mock_cgsnap)
expected_pgroup_name = \
pure._get_pgroup_name_from_id(mock_cgsnap.consistencygroup_id)
expected_snap_suffix = pure._get_pgroup_snap_suffix(mock_cgsnap)
self.array.create_pgroup_snapshot\
.assert_called_with(expected_pgroup_name,
suffix=expected_snap_suffix)
self.assertEqual({'status': 'available'}, model_update)
self.assertEqual(expected_snaps, snapshots)
self.assertEqual('available', mock_snap.status)
self.assert_error_propagates(
[self.array.create_pgroup_snapshot],
self.driver.create_cgsnapshot, mock_context, mock_cgsnap)
@mock.patch(DRIVER_PATH + "._get_pgroup_snap_name", autospec=True)
def test_delete_cgsnapshot(self, mock_get_snap_name):
snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \
"e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_get_snap_name.return_value = snap_name
mock_cgsnap = mock.Mock()
mock_cgsnap.status = 'deleted'
mock_context = mock.Mock()
mock_snap = mock.MagicMock()
expected_snaps = [mock_snap]
self.driver.db = mock.Mock()
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = \
expected_snaps
model_update, snapshots = \
self.driver.delete_cgsnapshot(mock_context, mock_cgsnap)
self.array.destroy_pgroup.assert_called_with(snap_name)
self.assertEqual({'status': mock_cgsnap.status}, model_update)
self.assertEqual(expected_snaps, snapshots)
self.assertEqual('deleted', mock_snap.status)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group snapshot has been destroyed."
)
self.driver.delete_cgsnapshot(mock_context, mock_cgsnap)
self.array.destroy_pgroup.assert_called_with(snap_name)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group snapshot does not exist"
)
self.driver.delete_cgsnapshot(mock_context, mock_cgsnap)
self.array.destroy_pgroup.assert_called_with(snap_name)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Some other error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_cgsnapshot,
mock_context,
mock_cgsnap)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=500,
text="Another different error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_cgsnapshot,
mock_context,
mock_cgsnap)
self.array.destroy_pgroup.side_effect = None
self.assert_error_propagates(
[self.array.destroy_pgroup],
self.driver.delete_cgsnapshot, mock_context, mock_cgsnap)
def test_manage_existing(self):
ref_name = 'vol1'
volume_ref = {'name': ref_name}
self.array.list_volume_private_connections.return_value = []
vol_name = VOLUME['name'] + '-cinder'
self.driver.manage_existing(VOLUME, volume_ref)
self.array.list_volume_private_connections.assert_called_with(ref_name)
self.array.rename_volume.assert_called_with(ref_name, vol_name)
def test_manage_existing_error_propagates(self):
self.array.list_volume_private_connections.return_value = []
self.assert_error_propagates(
[self.array.list_volume_private_connections,
self.array.rename_volume],
self.driver.manage_existing,
VOLUME, {'name': 'vol1'}
)
def test_manage_existing_bad_ref(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'bad_key': 'bad_value'})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': ''})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': None})
self.array.get_volume.side_effect = \
self.purestorage_module.PureHTTPError(
text="Volume does not exist.",
code=400
)
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': 'non-existing-volume'})
def test_manage_existing_with_connected_hosts(self):
ref_name = 'vol1'
self.array.list_volume_private_connections.return_value = \
["host1", "host2"]
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': ref_name})
self.array.list_volume_private_connections.assert_called_with(ref_name)
self.assertFalse(self.array.rename_volume.called)
def test_manage_existing_get_size(self):
ref_name = 'vol1'
volume_ref = {'name': ref_name}
expected_size = 5
self.array.get_volume.return_value = {"size": 5368709120}
size = self.driver.manage_existing_get_size(VOLUME, volume_ref)
self.assertEqual(expected_size, size)
self.array.get_volume.assert_called_with(ref_name)
def test_manage_existing_get_size_error_propagates(self):
self.array.get_volume.return_value = mock.MagicMock()
self.assert_error_propagates([self.array.get_volume],
self.driver.manage_existing_get_size,
VOLUME, {'name': 'vol1'})
def test_manage_existing_get_size_bad_ref(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
VOLUME, {'bad_key': 'bad_value'})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
VOLUME, {'name': ''})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
VOLUME, {'name': None})
def test_unmanage(self):
vol_name = VOLUME['name'] + "-cinder"
unmanaged_vol_name = vol_name + "-unmanaged"
self.driver.unmanage(VOLUME)
self.array.rename_volume.assert_called_with(vol_name,
unmanaged_vol_name)
def test_unmanage_error_propagates(self):
self.assert_error_propagates([self.array.rename_volume],
self.driver.unmanage,
VOLUME)
def test_unmanage_with_deleted_volume(self):
vol_name = VOLUME['name'] + "-cinder"
unmanaged_vol_name = vol_name + "-unmanaged"
self.array.rename_volume.side_effect = \
self.purestorage_module.PureHTTPError(
text="Volume does not exist.",
code=400
)
self.driver.unmanage(VOLUME)
self.array.rename_volume.assert_called_with(vol_name,
unmanaged_vol_name)
| 44.311911
| 79
| 0.62821
|
6932ae3beccdf6e430399f8cd1c1d32073c17a47
| 2,855
|
py
|
Python
|
src/molecule/command/dependency.py
|
clickthisnick/molecule
|
3f640d7e5cff8fae12fe52aa9dc41bf3e19db8e5
|
[
"MIT"
] | null | null | null |
src/molecule/command/dependency.py
|
clickthisnick/molecule
|
3f640d7e5cff8fae12fe52aa9dc41bf3e19db8e5
|
[
"MIT"
] | null | null | null |
src/molecule/command/dependency.py
|
clickthisnick/molecule
|
3f640d7e5cff8fae12fe52aa9dc41bf3e19db8e5
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Dependency Command Module."""
import click
from molecule import logger
from molecule.command import base
LOG = logger.get_logger(__name__)
class Dependency(base.Base):
"""
Dependency Command Class.
.. program:: molecule dependency
.. option:: molecule dependency
Target the default scenario.
.. program:: molecule dependency --scenario-name foo
.. option:: molecule dependency --scenario-name foo
Targeting a specific scenario.
.. program:: molecule --debug dependency
.. option:: molecule --debug dependency
Executing with `debug`.
.. program:: molecule --base-config base.yml dependency
.. option:: molecule --base-config base.yml dependency
Executing with a `base-config`.
.. program:: molecule --env-file foo.yml dependency
.. option:: molecule --env-file foo.yml dependency
Load an env file to read variables from when rendering
molecule.yml.
"""
def execute(self):
"""
Execute the actions necessary to perform a `molecule dependency` and \
returns None.
:return: None
"""
self._config.dependency.execute()
@base.click_command_ex()
@click.pass_context
@click.option(
"--scenario-name",
"-s",
default=base.MOLECULE_DEFAULT_SCENARIO_NAME,
help="Name of the scenario to target. ({})".format(
base.MOLECULE_DEFAULT_SCENARIO_NAME
),
)
def dependency(ctx, scenario_name): # pragma: no cover
"""Manage the role's dependencies."""
args = ctx.obj.get("args")
subcommand = base._get_subcommand(__name__)
command_args = {"subcommand": subcommand}
base.execute_cmdline_scenarios(scenario_name, args, command_args)
| 30.698925
| 79
| 0.705429
|
55113bf1dce0eca4a87a1bfb84e4d77c51820556
| 1,676
|
py
|
Python
|
skywalking/plugins/sw_redis.py
|
fuhuo/skywalking-python
|
9cde2064409f4677df3bc1cda773e787522795cc
|
[
"Apache-2.0"
] | null | null | null |
skywalking/plugins/sw_redis.py
|
fuhuo/skywalking-python
|
9cde2064409f4677df3bc1cda773e787522795cc
|
[
"Apache-2.0"
] | null | null | null |
skywalking/plugins/sw_redis.py
|
fuhuo/skywalking-python
|
9cde2064409f4677df3bc1cda773e787522795cc
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from skywalking import Layer, Component
from skywalking.trace import tags
from skywalking.trace.context import get_context
from skywalking.trace.tags import Tag
def install():
from redis.connection import Connection
_send_command = Connection.send_command
def _sw_send_command(this: Connection, *args, **kwargs):
peer = "%s:%s" % (this.host, this.port)
op = args[0]
context = get_context()
with context.new_exit_span(op="Redis/"+op or "/", peer=peer) as span:
span.layer = Layer.Cache
span.component = Component.Redis
res = _send_command(this, *args, **kwargs)
span.tag(Tag(key=tags.DbType, val="Redis"))
span.tag(Tag(key=tags.DbInstance, val=this.db))
span.tag(Tag(key=tags.DbStatement, val=op))
return res
Connection.send_command = _sw_send_command
| 37.244444
| 77
| 0.708234
|
64741f45aa87afe54d903f7f1a23fb78c0b3a443
| 9,211
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20191101/route_table.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20191101/route_table.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20191101/route_table.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['RouteTable']
class RouteTable(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] disable_bgp_route_propagation: Whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['disable_bgp_route_propagation'] = disable_bgp_route_propagation
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if route_table_name is None:
raise TypeError("Missing required property 'route_table_name'")
__props__['route_table_name'] = route_table_name
__props__['routes'] = routes
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['subnets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20150615:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160330:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteTable")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteTable, __self__).__init__(
'azure-nextgen:network/v20191101:RouteTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteTable':
"""
Get an existing RouteTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return RouteTable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route table resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> pulumi.Output[Optional[Sequence['outputs.RouteResponse']]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.172222
| 2,301
| 0.676474
|
12a08695236f36364e99104244d5337da97cc72d
| 742
|
py
|
Python
|
src/ham/radio/radio_zone.py
|
n2qzshce/ham_radio_sync
|
ffe95a4ef88375b5d5e4503cf770e884644536cd
|
[
"CNRI-Python",
"RSA-MD"
] | 8
|
2021-03-01T17:28:45.000Z
|
2022-02-10T02:19:41.000Z
|
src/ham/radio/radio_zone.py
|
n2qzshce/ham_radio_sync
|
ffe95a4ef88375b5d5e4503cf770e884644536cd
|
[
"CNRI-Python",
"RSA-MD"
] | 7
|
2021-03-31T03:37:12.000Z
|
2021-06-12T20:30:05.000Z
|
src/ham/radio/radio_zone.py
|
n2qzshce/ham_radio_sync
|
ffe95a4ef88375b5d5e4503cf770e884644536cd
|
[
"CNRI-Python",
"RSA-MD"
] | 1
|
2021-02-24T21:39:51.000Z
|
2021-02-24T21:39:51.000Z
|
from src.ham.util.data_column import DataColumn
class RadioZone:
def __init__(self, cols):
self.number = DataColumn(fmt_name='number', fmt_val=cols['number'], shape=int)
self.name = DataColumn(fmt_name='name', fmt_val=cols['name'], shape=str)
self._associated_channels = list()
self.cols = cols
def add_channel(self, radio_channel):
self._associated_channels.append(radio_channel)
def has_channels(self):
return len(self._associated_channels) > 0
@classmethod
def create_empty(cls):
col_vals = dict()
col_vals['number'] = ''
col_vals['name'] = ''
return cls(col_vals)
def headers(self):
raise Exception("Base method cannot be called!")
def output(self):
raise Exception("Base method cannot be called!")
| 24.733333
| 80
| 0.727763
|
715a6f11f58198a10851dcd7dab8c4e6743bd063
| 1,691
|
py
|
Python
|
mars/tensor/base/broadcast_arrays.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/tensor/base/broadcast_arrays.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/tensor/base/broadcast_arrays.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...core import ExecutableTuple
from ..utils import broadcast_shape
from ..datasource import tensor as astensor
from .broadcast_to import broadcast_to
def broadcast_arrays(*args, **kwargs):
"""
Broadcast any number of arrays against each other.
Parameters
----------
`*args` : array_likes
The tensors to broadcast.
Returns
-------
broadcasted : list of tensors
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([[1,2,3]])
>>> y = mt.array([[1],[2],[3]])
>>> mt.broadcast_arrays(x, y).execute()
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
"""
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
f'argument {next(iter(kwargs.keys()))!r}')
args = [astensor(arg) for arg in args]
shape = broadcast_shape(*[arg.shape for arg in args])
return ExecutableTuple([broadcast_to(a, shape) for a in args])
| 29.155172
| 74
| 0.630396
|
72359d4bf31a6843013e111f7c65c642e5589a5a
| 1,331
|
py
|
Python
|
rlpyt/ul/runners/minibatch_rl_replaysaver.py
|
traffic-lights/rlpyt
|
ec4689cddd55d98c037194685cfd6ca8e6785014
|
[
"MIT"
] | 2,122
|
2019-07-02T13:19:10.000Z
|
2022-03-22T09:59:42.000Z
|
rlpyt/ul/runners/minibatch_rl_replaysaver.py
|
traffic-lights/rlpyt
|
ec4689cddd55d98c037194685cfd6ca8e6785014
|
[
"MIT"
] | 206
|
2019-07-02T14:19:42.000Z
|
2022-02-15T02:34:28.000Z
|
rlpyt/ul/runners/minibatch_rl_replaysaver.py
|
traffic-lights/rlpyt
|
ec4689cddd55d98c037194685cfd6ca8e6785014
|
[
"MIT"
] | 369
|
2019-07-02T13:38:28.000Z
|
2022-03-28T11:16:39.000Z
|
import pickle
import os.path as osp
import time
from rlpyt.runners.minibatch_rl import MinibatchRl, MinibatchRlEval
from rlpyt.utils.logging import logger
from rlpyt.utils.prog_bar import ProgBarCounter
from rlpyt.ul.runners.envstep_runner import MinibatchRlEvalEnvStep
class ReplaySaverMixin:
def log_diagnostics(self, itr, *args, **kwargs):
if itr > 0:
logger.log("Saving replay buffer...")
cum_steps = (itr + 1) * self.sampler.batch_size * self.world_size
snapshot_mode = logger.get_snapshot_mode()
if snapshot_mode == "all":
filename = f"replaybuffer_{cum_steps}.pkl"
elif snapshot_mode == "last":
filename = "replaybuffer.pkl"
else:
raise NotImplementedError
filename = osp.join(logger.get_snapshot_dir(), filename)
with open(filename, "wb") as fh:
pickle.dump(self.algo.replay_buffer, fh, protocol=4)
logger.log("Replay buffer saved.")
super().log_diagnostics(itr, *args, **kwargs)
class MinibatchRlReplaySaver(ReplaySaverMixin, MinibatchRl):
pass
class MinibatchRlEvalReplaySaver(ReplaySaverMixin, MinibatchRlEval):
pass
class MinibatchRlEvalEnvStepReplaySaver(ReplaySaverMixin, MinibatchRlEvalEnvStep):
pass
| 31.690476
| 82
| 0.678437
|
97bcaf4db886b60a83cd2c60992b7acac11bb43b
| 2,255
|
py
|
Python
|
syft/grid/autoscale/gcloud.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 1
|
2020-10-06T17:03:26.000Z
|
2020-10-06T17:03:26.000Z
|
syft/grid/autoscale/gcloud.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | null | null | null |
syft/grid/autoscale/gcloud.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 1
|
2021-09-04T16:27:41.000Z
|
2021-09-04T16:27:41.000Z
|
"""To autoscale pygrid workers on Google Cloud Platfrom"""
import json
import IPython
import terrascript
import terrascript.provider
import terrascript.resource
from utils.script import terraform_script
from utils.notebook import terraform_notebook
class GoogleCloud:
"""This class defines automates the spinning up of Google Cloud Instances"""
def __init__(self, credentials, project_id, region):
"""
args:
credentials: Path to the credentials json file
project_id: project_id of your project in GCP
region: region of your GCP project
"""
self.credentials = credentials
self.project_id = project_id
self.region = region
self.config = terrascript.Terrascript()
self.config += terrascript.provider.google(
credentials=self.credentials, project=self.project_id, region=self.region
)
with open("main.tf.json", "w") as main_config:
json.dump(self.config, main_config, indent=2, sort_keys=False)
if IPython.get_ipython():
terraform_notebook.init()
else:
terraform_script.init()
def compute_instance(self, name, machine_type, zone, image_family):
"""
args:
name: name of the compute instance
machine_type: the type of machine
zone: zone of your GCP project
image_family: image of the OS
"""
self.config += terrascript.resource.google_compute_instance(
name,
name=name,
machine_type=machine_type,
zone=zone,
boot_disk={"initialize_params": {"image": image_family}},
network_interface={"network": "default", "access_config": {}},
)
with open("main.tf.json", "w") as main_config:
json.dump(self.config, main_config, indent=2, sort_keys=False)
if IPython.get_ipython():
terraform_notebook.apply()
else:
terraform_script.apply()
def destroy(self):
"""
args:
"""
if IPython.get_ipython():
terraform_notebook.destroy()
else:
terraform_script.destroy()
del self.credentials
| 32.681159
| 85
| 0.615521
|
0cdda1bcd0b864fb922d7de72eff7253c7a1225f
| 8,972
|
py
|
Python
|
opentelemetry-sdk/tests/context/propagation/test_b3_format.py
|
ThePumpingLemma/opentelemetry-python
|
9ed98eb9320b9064e43c3b43ee7c4990eec3657a
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-sdk/tests/context/propagation/test_b3_format.py
|
ThePumpingLemma/opentelemetry-python
|
9ed98eb9320b9064e43c3b43ee7c4990eec3657a
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-sdk/tests/context/propagation/test_b3_format.py
|
ThePumpingLemma/opentelemetry-python
|
9ed98eb9320b9064e43c3b43ee7c4990eec3657a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import opentelemetry.sdk.context.propagation.b3_format as b3_format
import opentelemetry.sdk.trace as trace
import opentelemetry.trace as trace_api
from opentelemetry.trace.propagation import (
get_span_from_context,
set_span_in_context,
)
FORMAT = b3_format.B3Format()
def get_as_list(dict_object, key):
value = dict_object.get(key)
return [value] if value is not None else []
def get_child_parent_new_carrier(old_carrier):
ctx = FORMAT.extract(get_as_list, old_carrier)
parent_context = get_span_from_context(ctx).get_context()
parent = trace.Span("parent", parent_context)
child = trace.Span(
"child",
trace_api.SpanContext(
parent_context.trace_id,
trace.generate_span_id(),
trace_flags=parent_context.trace_flags,
trace_state=parent_context.trace_state,
),
parent=parent,
)
new_carrier = {}
ctx = set_span_in_context(child)
FORMAT.inject(dict.__setitem__, new_carrier, context=ctx)
return child, parent, new_carrier
class TestB3Format(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.serialized_trace_id = b3_format.format_trace_id(
trace.generate_trace_id()
)
cls.serialized_span_id = b3_format.format_span_id(
trace.generate_span_id()
)
cls.serialized_parent_id = b3_format.format_span_id(
trace.generate_span_id()
)
def test_extract_multi_header(self):
"""Test the extraction of B3 headers."""
child, parent, new_carrier = get_child_parent_new_carrier(
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.PARENT_SPAN_ID_KEY: self.serialized_parent_id,
FORMAT.SAMPLED_KEY: "1",
}
)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY],
b3_format.format_trace_id(child.context.trace_id),
)
self.assertEqual(
new_carrier[FORMAT.SPAN_ID_KEY],
b3_format.format_span_id(child.context.span_id),
)
self.assertEqual(
new_carrier[FORMAT.PARENT_SPAN_ID_KEY],
b3_format.format_span_id(parent.context.span_id),
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_extract_single_header(self):
"""Test the extraction from a single b3 header."""
child, parent, new_carrier = get_child_parent_new_carrier(
{
FORMAT.SINGLE_HEADER_KEY: "{}-{}".format(
self.serialized_trace_id, self.serialized_span_id
)
}
)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY],
b3_format.format_trace_id(child.context.trace_id),
)
self.assertEqual(
new_carrier[FORMAT.SPAN_ID_KEY],
b3_format.format_span_id(child.context.span_id),
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
child, parent, new_carrier = get_child_parent_new_carrier(
{
FORMAT.SINGLE_HEADER_KEY: "{}-{}-1-{}".format(
self.serialized_trace_id,
self.serialized_span_id,
self.serialized_parent_id,
)
}
)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY],
b3_format.format_trace_id(child.context.trace_id),
)
self.assertEqual(
new_carrier[FORMAT.SPAN_ID_KEY],
b3_format.format_span_id(child.context.span_id),
)
self.assertEqual(
new_carrier[FORMAT.PARENT_SPAN_ID_KEY],
b3_format.format_span_id(parent.context.span_id),
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_extract_header_precedence(self):
"""A single b3 header should take precedence over multiple
headers.
"""
single_header_trace_id = self.serialized_trace_id[:-3] + "123"
_, _, new_carrier = get_child_parent_new_carrier(
{
FORMAT.SINGLE_HEADER_KEY: "{}-{}".format(
single_header_trace_id, self.serialized_span_id
),
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: "1",
}
)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY], single_header_trace_id
)
def test_enabled_sampling(self):
"""Test b3 sample key variants that turn on sampling."""
for variant in ["1", "True", "true", "d"]:
_, _, new_carrier = get_child_parent_new_carrier(
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: variant,
}
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_disabled_sampling(self):
"""Test b3 sample key variants that turn off sampling."""
for variant in ["0", "False", "false", None]:
_, _, new_carrier = get_child_parent_new_carrier(
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: variant,
}
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "0")
def test_flags(self):
"""x-b3-flags set to "1" should result in propagation."""
_, _, new_carrier = get_child_parent_new_carrier(
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_flags_and_sampling(self):
"""Propagate if b3 flags and sampling are set."""
_, _, new_carrier = get_child_parent_new_carrier(
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_64bit_trace_id(self):
"""64 bit trace ids should be padded to 128 bit trace ids."""
trace_id_64_bit = self.serialized_trace_id[:16]
_, _, new_carrier = get_child_parent_new_carrier(
{
FORMAT.TRACE_ID_KEY: trace_id_64_bit,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY], "0" * 16 + trace_id_64_bit
)
def test_invalid_single_header(self):
"""If an invalid single header is passed, return an
invalid SpanContext.
"""
carrier = {FORMAT.SINGLE_HEADER_KEY: "0-1-2-3-4-5-6-7"}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_span_from_context(ctx).get_context()
self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID)
self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID)
def test_missing_trace_id(self):
"""If a trace id is missing, populate an invalid trace id."""
carrier = {
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_span_from_context(ctx).get_context()
self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID)
def test_missing_span_id(self):
"""If a trace id is missing, populate an invalid trace id."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.FLAGS_KEY: "1",
}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_span_from_context(ctx).get_context()
self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID)
| 34.910506
| 75
| 0.61926
|
edbda457253b500ad23db908c6a7742ecbb70099
| 5,336
|
py
|
Python
|
gazoo_device/capabilities/shell_ssh.py
|
dedsec-9/gazoo-device
|
5ed2867c258da80e53b6aae07ec7a65efe473a28
|
[
"Apache-2.0"
] | 14
|
2020-11-05T23:23:32.000Z
|
2022-03-01T18:59:29.000Z
|
gazoo_device/capabilities/shell_ssh.py
|
dedsec-9/gazoo-device
|
5ed2867c258da80e53b6aae07ec7a65efe473a28
|
[
"Apache-2.0"
] | 1
|
2021-06-24T19:20:50.000Z
|
2021-06-24T19:20:50.000Z
|
gazoo_device/capabilities/shell_ssh.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | 5
|
2021-05-20T22:52:51.000Z
|
2022-02-21T08:46:21.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common shell() capability for devices communicating over SSH."""
import time
from gazoo_device import config
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.capabilities.interfaces import shell_base
_SSH_CONNECTION_FAILURE_MARKERS = ["Connection to", "Connection reset"]
logger = gdm_logger.get_logger()
class ShellSSH(shell_base.ShellBase):
"""Common shell() method implementation for devices communicating over SSH."""
def __init__(self,
send_and_expect,
device_name,
shell_cmd=shell_base._SHELL_CMD,
shell_regex=shell_base._SHELL_REGEX,
tries=shell_base._TRIES,
timeout=shell_base._TIMEOUT,
failure_markers=None):
"""Initalize the SSH shell capability.
Args:
send_and_expect (method): bound send_and_expect method of the device
class instance.
device_name (str): name of the device using this capability.
shell_cmd (str): return code wrapper around the shell command to
execute.
shell_regex (str): shell regex to use. Must contain two capturing
groups: one for the output and one for the return code.
tries (int): how many times to try sending the shell command.
timeout (float): shell timeout in seconds.
failure_markers (list): list of markers (strings) indicating SSH
connection failure.
"""
super(ShellSSH, self).__init__(
send_and_expect=send_and_expect,
device_name=device_name,
shell_cmd=shell_cmd,
shell_regex=shell_regex,
tries=tries,
timeout=timeout)
if failure_markers is None:
failure_markers = _SSH_CONNECTION_FAILURE_MARKERS
self._failure_markers = failure_markers
def shell(self,
command,
command_name="shell",
timeout=None,
port=0,
include_return_code=False,
searchwindowsize=config.SEARCHWINDOWSIZE):
"""Sends command and returns response and optionally return code.
Args:
command (str): Command to send to the device.
command_name (str): Identifier for command.
timeout (float): Time in seconds to wait for device to respond.
port (int): Which port to send on, 0 or 1.
include_return_code (bool): flag indicating return code should be
returned.
searchwindowsize (int): Number of the last bytes to look at.
Raises:
DeviceError: if communication fails.
Note:
Can try multiple times as connection can sometimes fail.
See the init args for setting the number of retry attempts.
Returns:
str: If include_return_code is False return the device response to
the command.
tuple: If include_return_code is True return the device response and
return code.
"""
if timeout is None:
timeout = self._timeout
command = self._shell_cmd.format(cmd=command.rstrip())
logger.debug("{} sending {!r} to generate {} in {}s on port {}",
self._device_name, command, command_name, timeout, port)
for attempt in range(self._tries):
response = self._send_and_expect(
command, [self._shell_regex],
timeout=timeout,
port=port,
searchwindowsize=searchwindowsize,
expect_type="response")
if not response.timedout:
break
if (any(failure_marker in response.before
for failure_marker in self._failure_markers) and
attempt < self._tries - 1):
logger.warning(
"{}: SSH connection died with output {}. Trying again.".format(
self._device_name, response.before))
# SSH connection died. Retry.
time.sleep(.1)
else:
raise errors.DeviceError("Device {} shell failed for command {!r}. "
"Timed out waiting {}s for response. "
"Shell output: {!r}.".format(
self._device_name, command, timeout,
response.before))
result = response.match.group(1).strip()
return_code = int(response.match.group(2))
if include_return_code:
return result, return_code
else:
return result
def has_command(self, binary_name):
"""Returns if binary_name is installed on the device.
Args:
binary_name (str): name of the executable.
Returns:
bool: True if the executable is found on the device, False
otherwise.
"""
_, result_code = self.shell(
f'which {binary_name}\n', include_return_code=True)
return result_code == 0
| 35.337748
| 80
| 0.648238
|
8af320f6a1698c3d5d4950ffb6cb92293b3f78fc
| 33,201
|
py
|
Python
|
byudml/profiler/profiler_primitive.py
|
byu-dml/d3m-primitives
|
f25d5d2572d0e7f29ecf2bef98ac2fc0613eb053
|
[
"MIT"
] | 4
|
2018-04-13T21:27:58.000Z
|
2020-10-08T00:46:19.000Z
|
byudml/profiler/profiler_primitive.py
|
byu-dml/d3m-primitives
|
f25d5d2572d0e7f29ecf2bef98ac2fc0613eb053
|
[
"MIT"
] | 62
|
2018-05-05T20:47:08.000Z
|
2022-02-10T16:54:10.000Z
|
byudml/profiler/profiler_primitive.py
|
byu-dml/d3m-primitives
|
f25d5d2572d0e7f29ecf2bef98ac2fc0613eb053
|
[
"MIT"
] | 2
|
2020-02-07T03:31:11.000Z
|
2021-03-18T19:22:20.000Z
|
"""
This code has been copied and modified from https://gitlab.com/datadrivendiscovery/common-primitives/-/blob/c170029e9a0f875af28c6b9af20adc90bd4df0bb/common_primitives/simple_profiler.py
"""
import collections
import copy
import os.path
import re
import typing
import multiprocessing as mp
import pickle
import sys
import zipfile
import numpy as np # type: ignore
import pandas as pd # type: ignore
from pandas.io import parsers as pandas_parsers # type: ignore
from sentence_transformers import SentenceTransformer
from d3m import container, exceptions, utils as d3m_utils
from d3m.base import utils as base_utils
from d3m.metadata import base as metadata_base, hyperparams as hyperparams_module, params
from d3m.primitive_interfaces import base, unsupervised_learning
from byudml import __profiler_path__, __version__
import common_primitives
from common_primitives import utils
__all__ = ('SimpleProfilerPrimitive',)
WHITESPACE_REGEX = re.compile(r'\s')
if hasattr(pandas_parsers, 'STR_NA_VALUES'):
NA_VALUES = pandas_parsers.STR_NA_VALUES
else:
# Backwards compatibility for Pandas before 1.0.0.
NA_VALUES = pandas_parsers._NA_VALUES
Inputs = container.DataFrame
Outputs = container.DataFrame
class Params(params.Params):
add_semantic_types: typing.Optional[typing.List[typing.List[str]]]
remove_semantic_types: typing.Optional[typing.List[typing.List[str]]]
class Hyperparams(hyperparams_module.Hyperparams):
detect_semantic_types = hyperparams_module.Set(
elements=hyperparams_module.Enumeration(
values=[
'http://schema.org/Boolean', 'https://metadata.datadrivendiscovery.org/types/CategoricalData',
'http://schema.org/Integer', 'http://schema.org/Float', 'http://schema.org/Text',
'https://metadata.datadrivendiscovery.org/types/FloatVector', 'http://schema.org/DateTime',
'https://metadata.datadrivendiscovery.org/types/UniqueKey',
'https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/Time',
'https://metadata.datadrivendiscovery.org/types/TrueTarget',
'https://metadata.datadrivendiscovery.org/types/UnknownType',
'https://metadata.datadrivendiscovery.org/types/PrimaryKey',
'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey',
],
default='http://schema.org/Boolean',
),
default=(
'http://schema.org/Boolean', 'https://metadata.datadrivendiscovery.org/types/CategoricalData',
'http://schema.org/Integer', 'http://schema.org/Float', 'http://schema.org/Text',
'https://metadata.datadrivendiscovery.org/types/FloatVector', 'http://schema.org/DateTime',
'https://metadata.datadrivendiscovery.org/types/UniqueKey',
'https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/Time',
'https://metadata.datadrivendiscovery.org/types/TrueTarget',
'https://metadata.datadrivendiscovery.org/types/UnknownType',
'https://metadata.datadrivendiscovery.org/types/PrimaryKey',
'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey',
),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of semantic types to detect and set. One can provide a subset of supported semantic types to limit what the primitive detects.",
)
remove_unknown_type = hyperparams_module.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Remove \"https://metadata.datadrivendiscovery.org/types/UnknownType\" semantic type from columns on which the primitive has detected other semantic types.",
)
categorical_max_absolute_distinct_values = hyperparams_module.Union[typing.Union[int, None]](
configuration=collections.OrderedDict(
limit=hyperparams_module.Bounded[int](
lower=1,
upper=None,
default=50,
),
unlimited=hyperparams_module.Hyperparameter[None](
default=None,
description='No absolute limit on distinct values.',
),
),
default='limit',
description='The maximum absolute number of distinct values (all missing values as counted as one distinct value) for a column to be considered categorical.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
categorical_max_ratio_distinct_values = hyperparams_module.Bounded[float](
lower=0,
upper=1,
default=0.05,
description='The maximum ratio of distinct values (all missing values as counted as one distinct value) vs. number of rows for a column to be considered categorical.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
nan_values = hyperparams_module.Set(
elements=hyperparams_module.Hyperparameter[str](''),
default=sorted(NA_VALUES),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of strings to recognize as NaNs when detecting a float column.",
)
text_min_ratio_values_with_whitespace = hyperparams_module.Bounded[float](
lower=0,
upper=1,
default=0.5,
description='The minimum ratio of values with any whitespace (after first stripping) vs. number of rows for a column to be considered a text column.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
use_columns = hyperparams_module.Set(
elements=hyperparams_module.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be detected, it is skipped.",
)
exclude_columns = hyperparams_module.Set(
elements=hyperparams_module.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams_module.Enumeration(
values=['append', 'replace', 'new'],
default='replace',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should detected columns be appended, should they replace original columns, or should only detected columns be returned?",
)
add_index_columns = hyperparams_module.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
replace_index_columns = hyperparams_module.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Replace primary index columns even if otherwise appending columns. Applicable only if \"return_result\" is set to \"append\".",
)
class SemanticProfilerPrimitive(unsupervised_learning.UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
A primitive which determines missing semantic types for columns and adds
them automatically. It uses a set of hard-coded rules/heuristics to determine
semantic types. Feel free to propose improvements.
Besides determining column types it also determines some column roles.
Some rules are intuitive and expected, but there are also few special behaviors
(if not disabled by not providing a corresponding semantic type in
``detect_semantic_types``):
* If a column does not have any semantic types,
``https://metadata.datadrivendiscovery.org/types/UnknownType`` semantic type
is first set for the column. If any other semantic type is set later on as
part of logic of this primitive, the
``https://metadata.datadrivendiscovery.org/types/UnknownType`` is removed
(including if the column originally came with this semantic type).
* If a column has ``https://metadata.datadrivendiscovery.org/types/SuggestedTarget``
semantic type and no other column (even those not otherwise operated on by
the primitive) has a semantic type
``https://metadata.datadrivendiscovery.org/types/TrueTarget`` is set on
the column. This allows operation on data without a problem description.
This is only for the first such column.
* All other columns which are missing semantic types initially we set as
``https://metadata.datadrivendiscovery.org/types/Attribute``.
* Any column with ``http://schema.org/DateTime`` semantic type is also set
as ``https://metadata.datadrivendiscovery.org/types/Time`` semantic type.
* ``https://metadata.datadrivendiscovery.org/types/PrimaryKey`` or
``https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey`` is set only
if no other column (even those not otherwise operated on by
the primitive) is a primary key, and set based on the column name: only
when it is ``d3mIndex``.
"""
__author__ = 'Brandon Schoenfeld'
_weights_configs = [
{
'type': 'FILE',
'key': 'distilbert-base-nli-stsb-mean-tokens.zip',
'file_uri': 'https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/v0.2/distilbert-base-nli-stsb-mean-tokens.zip',
'file_digest': '9d3dfdf353743741a4be36250868636d377b0dcc16067bfcdfc192fe7239d7c8',
},
]
metadata = metadata_base.PrimitiveMetadata({
'id': 'af214333-e67b-4e59-a49b-b16f5501a925',
'version': __version__,
'name': 'Semantic Profiler',
'description': 'This primitive is an adapatation of the d3m common profiler (https://gitlab.com/datadrivendiscovery/common-primitives/-/blob/c170029e9a0f875af28c6b9af20adc90bd4df0bb/common_primitives/simple_profiler.py). It predicts semantic column types using a natural language embeddings of the the column name. The internal model uses these embeddings to predict the semantic types found in the dataset annotations created by MIT Lincoln Labs.',
'python_path': __profiler_path__,
'source': {
'name': 'byu-dml',
'contact': 'mailto:bjschoenfeld@gmail.com',
'uris': [
'https://github.com/byu-dml/d3m-primitives'
],
},
'installation': [
{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package': 'byudml',
'version': __version__,
},
] + _weights_configs,
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.DATA_PROFILING,
],
'primitive_family': metadata_base.PrimitiveFamily.SCHEMA_DISCOVERY,
})
def __init__(self, *, hyperparams: Hyperparams, volumes: typing.Optional[typing.Dict[str, str]]=None) -> None:
super().__init__(hyperparams=hyperparams, volumes=volumes)
self._training_inputs: Inputs = None
self._add_semantic_types: typing.List[typing.List[str]] = None
self._remove_semantic_types: typing.List[typing.List[str]] = None
self._fitted: bool = False
self._emb_model = self._init_embedding_model()
self._profiler_model = self._init_profiler_model()
def _init_embedding_model(self) -> SentenceTransformer:
weights_path = self._find_weights_path(self._weights_configs[0]['key'])
weights_path = self._extract_weights(weights_path)
with d3m_utils.silence():
emb_model = SentenceTransformer(weights_path)
return emb_model
def _find_weights_path(self, key_filename):
if key_filename in self.volumes:
weight_file_path = self.volumes[key_filename]
else:
weight_file_path = os.path.join('.', self._weights_configs['file_digest'], key_filename)
if not os.path.isfile(weight_file_path):
raise ValueError(
"Can't get weights file from volumes by key '{key_filename}' and at path '{path}'.".format(
key_filename=key_filename,
path=weight_file_path,
),
)
return weight_file_path
def _extract_weights(self, weights_path):
extracted_weights_path = weights_path[:-4] # remove .zip
if not os.path.isfile(extracted_weights_path):
with zipfile.ZipFile(weights_path, 'r') as zf:
zf.extractall(extracted_weights_path)
return extracted_weights_path
def _init_profiler_model(self):
zip_model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model.zip')
model_path = os.path.join(self._extract_weights(zip_model_path), 'model.bin')
with open(model_path, 'rb') as f:
profiler_model = pickle.load(f)
return profiler_model
def _predict_semantic_type(self, input_column: container.DataFrame) -> str:
column_name = input_column.metadata.query(('ALL_ELEMENTS', 0))['name']
with d3m_utils.silence():
column_name_emb = self._emb_model.encode([column_name.lower()], show_progress_bar=False)
prediction = self._profiler_model.predict(column_name_emb)
assert prediction.shape[0] == 1
return prediction[0]
def set_training_data(self, *, inputs: Inputs) -> None:
self._training_inputs = inputs
self._fitted = False
self._dataset_name = '' # todo
self._dataset_description = '' # todo
def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:
# The logic of detecting values tries to mirror also the logic of parsing
# values in "ColumnParserPrimitive". One should keep them in sync.
if self._training_inputs is None:
raise exceptions.InvalidStateError("Missing training data.")
self._add_semantic_types, self._remove_semantic_types = self._fit_columns(self._training_inputs)
self._fitted = True
return base.CallResult(None)
def _fit_columns(self, inputs: Inputs) -> typing.Tuple[typing.List[typing.List[str]], typing.List[typing.List[str]]]:
true_target_columns = inputs.metadata.list_columns_with_semantic_types(['https://metadata.datadrivendiscovery.org/types/TrueTarget'])
index_columns = inputs.metadata.get_index_columns()
# Target and index columns should be set only once, if they are set.
has_set_target_columns = False
has_set_index_column = False
columns_to_use = self._get_columns(inputs.metadata)
fitted_add_semantic_types = []
fitted_remove_semantic_types = []
for column_index in columns_to_use:
input_column = inputs.select_columns([column_index])
column_metadata = inputs.metadata.query_column(column_index)
column_name = column_metadata.get('name', str(column_index))
column_semantic_types = list(column_metadata.get('semantic_types', []))
# We might be here because column has a known type, but it has "https://metadata.datadrivendiscovery.org/types/SuggestedTarget" set.
has_unknown_type = not column_semantic_types or 'https://metadata.datadrivendiscovery.org/types/UnknownType' in column_semantic_types
# A normalized copy of semantic types, which always includes unknown type.
normalized_column_semantic_types = copy.copy(column_semantic_types)
# If we are processing this column and it does not have semantic type that it has missing semantic types,
# we first set it, to normalize the input semantic types. If we will add any other semantic type,
# we will then remove this semantic type.
if has_unknown_type \
and 'https://metadata.datadrivendiscovery.org/types/UnknownType' in self.hyperparams['detect_semantic_types'] \
and 'https://metadata.datadrivendiscovery.org/types/UnknownType' not in normalized_column_semantic_types:
normalized_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/UnknownType')
# A working copy of semantic types.
new_column_semantic_types = copy.copy(normalized_column_semantic_types)
if has_unknown_type:
is_float = self._is_float(input_column)
is_integer = self._is_integer(input_column)
# If it looks like proper float (so not integer encoded as float), then we do not detect it as boolean.
if self._is_boolean(input_column) \
and (not is_float or is_integer) \
and 'http://schema.org/Boolean' in self.hyperparams['detect_semantic_types'] \
and 'http://schema.org/Boolean' not in new_column_semantic_types:
new_column_semantic_types.append('http://schema.org/Boolean')
# If it looks like proper float (so not integer encoded as float), then we do not detect it as categorical.
elif self._is_categorical(input_column) \
and (not is_float or is_integer) \
and 'https://metadata.datadrivendiscovery.org/types/CategoricalData' in self.hyperparams['detect_semantic_types'] \
and 'https://metadata.datadrivendiscovery.org/types/CategoricalData' not in new_column_semantic_types:
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/CategoricalData')
elif is_integer \
and 'http://schema.org/Integer' in self.hyperparams['detect_semantic_types'] \
and 'http://schema.org/Integer' not in new_column_semantic_types:
new_column_semantic_types.append('http://schema.org/Integer')
elif is_float \
and 'http://schema.org/Float' in self.hyperparams['detect_semantic_types'] \
and 'http://schema.org/Float' not in new_column_semantic_types:
new_column_semantic_types.append('http://schema.org/Float')
elif self._is_float_vector(input_column) \
and 'https://metadata.datadrivendiscovery.org/types/FloatVector' in self.hyperparams['detect_semantic_types'] \
and 'https://metadata.datadrivendiscovery.org/types/FloatVector' not in new_column_semantic_types:
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/FloatVector')
elif self._is_datetime(input_column) \
and 'http://schema.org/DateTime' in self.hyperparams['detect_semantic_types'] \
and 'http://schema.org/DateTime' not in new_column_semantic_types:
new_column_semantic_types.append('http://schema.org/DateTime')
elif self._is_text(input_column) \
and 'http://schema.org/Text' in self.hyperparams['detect_semantic_types'] \
and 'http://schema.org/Text' not in new_column_semantic_types:
new_column_semantic_types.append('http://schema.org/Text')
if 'https://metadata.datadrivendiscovery.org/types/UniqueKey' in self.hyperparams['detect_semantic_types'] \
and self._is_unique_key(input_column) \
and 'http://schema.org/Text' not in new_column_semantic_types \
and 'https://metadata.datadrivendiscovery.org/types/UniqueKey' not in new_column_semantic_types:
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/UniqueKey')
if not true_target_columns \
and not has_set_target_columns \
and 'https://metadata.datadrivendiscovery.org/types/TrueTarget' in self.hyperparams['detect_semantic_types'] \
and 'https://metadata.datadrivendiscovery.org/types/SuggestedTarget' in new_column_semantic_types:
# It should not be set because there are no columns with this semantic type in whole DataFrame.
assert 'https://metadata.datadrivendiscovery.org/types/TrueTarget' not in new_column_semantic_types
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if 'https://metadata.datadrivendiscovery.org/types/Target' not in new_column_semantic_types:
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/Target')
if 'https://metadata.datadrivendiscovery.org/types/Attribute' in new_column_semantic_types:
new_column_semantic_types.remove('https://metadata.datadrivendiscovery.org/types/Attribute')
has_set_target_columns = True
if has_unknown_type:
if not index_columns and not has_set_index_column:
if 'https://metadata.datadrivendiscovery.org/types/PrimaryKey' in self.hyperparams['detect_semantic_types'] \
and column_name == 'd3mIndex' \
and 'https://metadata.datadrivendiscovery.org/types/UniqueKey' in new_column_semantic_types:
# It should not be set because there are no columns with this semantic type in whole DataFrame.
assert 'https://metadata.datadrivendiscovery.org/types/PrimaryKey' not in new_column_semantic_types
assert 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey' not in new_column_semantic_types
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/PrimaryKey')
new_column_semantic_types.remove('https://metadata.datadrivendiscovery.org/types/UniqueKey')
if 'https://metadata.datadrivendiscovery.org/types/Attribute' in new_column_semantic_types:
new_column_semantic_types.remove('https://metadata.datadrivendiscovery.org/types/Attribute')
has_set_index_column = True
elif 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey' in self.hyperparams['detect_semantic_types'] \
and column_name == 'd3mIndex':
assert 'https://metadata.datadrivendiscovery.org/types/UniqueKey' not in new_column_semantic_types
# It should not be set because there are no columns with this semantic type in whole DataFrame.
assert 'https://metadata.datadrivendiscovery.org/types/PrimaryKey' not in new_column_semantic_types
assert 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey' not in new_column_semantic_types
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')
if 'https://metadata.datadrivendiscovery.org/types/Attribute' in new_column_semantic_types:
new_column_semantic_types.remove('https://metadata.datadrivendiscovery.org/types/Attribute')
has_set_index_column = True
if 'https://metadata.datadrivendiscovery.org/types/Attribute' in self.hyperparams['detect_semantic_types'] \
and 'https://metadata.datadrivendiscovery.org/types/TrueTarget' not in new_column_semantic_types \
and 'https://metadata.datadrivendiscovery.org/types/PrimaryKey' not in new_column_semantic_types \
and 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey' not in new_column_semantic_types \
and 'https://metadata.datadrivendiscovery.org/types/Attribute' not in new_column_semantic_types:
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/Attribute')
if 'https://metadata.datadrivendiscovery.org/types/Time' in self.hyperparams['detect_semantic_types'] \
and 'http://schema.org/DateTime' in new_column_semantic_types \
and 'https://metadata.datadrivendiscovery.org/types/Time' not in new_column_semantic_types:
new_column_semantic_types.append('https://metadata.datadrivendiscovery.org/types/Time')
# Have we added any other semantic type besides unknown type?
if new_column_semantic_types != normalized_column_semantic_types:
if self.hyperparams['remove_unknown_type'] and 'https://metadata.datadrivendiscovery.org/types/UnknownType' in new_column_semantic_types:
new_column_semantic_types.remove('https://metadata.datadrivendiscovery.org/types/UnknownType')
new_column_semantic_types_set = set(new_column_semantic_types)
column_semantic_types_set = set(column_semantic_types)
fitted_add_semantic_types.append(sorted(new_column_semantic_types_set - column_semantic_types_set))
fitted_remove_semantic_types.append(sorted(column_semantic_types_set - new_column_semantic_types_set))
assert len(fitted_add_semantic_types) == len(columns_to_use)
assert len(fitted_remove_semantic_types) == len(columns_to_use)
return fitted_add_semantic_types, fitted_remove_semantic_types
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
if not self._fitted:
raise exceptions.PrimitiveNotFittedError("Primitive not fitted.")
assert self._add_semantic_types is not None
assert self._remove_semantic_types is not None
columns_to_use, output_columns = self._produce_columns(inputs, self._add_semantic_types, self._remove_semantic_types)
if self.hyperparams['replace_index_columns'] and self.hyperparams['return_result'] == 'append':
assert len(columns_to_use) == len(output_columns)
index_columns = inputs.metadata.get_index_columns()
index_columns_to_use = []
other_columns_to_use = []
index_output_columns = []
other_output_columns = []
for column_to_use, output_column in zip(columns_to_use, output_columns):
if column_to_use in index_columns:
index_columns_to_use.append(column_to_use)
index_output_columns.append(output_column)
else:
other_columns_to_use.append(column_to_use)
other_output_columns.append(output_column)
outputs = base_utils.combine_columns(inputs, index_columns_to_use, index_output_columns, return_result='replace', add_index_columns=self.hyperparams['add_index_columns'])
outputs = base_utils.combine_columns(outputs, other_columns_to_use, other_output_columns, return_result='append', add_index_columns=self.hyperparams['add_index_columns'])
else:
outputs = base_utils.combine_columns(inputs, columns_to_use, output_columns, return_result=self.hyperparams['return_result'], add_index_columns=self.hyperparams['add_index_columns'])
return base.CallResult(outputs)
def _can_use_column(self, inputs_metadata: metadata_base.DataMetadata, column_index: int) -> bool:
column_metadata = inputs_metadata.query_column(column_index)
semantic_types = column_metadata.get('semantic_types', [])
# We detect only on columns which have no semantic types or
# where it is explicitly set as unknown.
if not semantic_types or 'https://metadata.datadrivendiscovery.org/types/UnknownType' in semantic_types:
return True
# A special case to handle setting "https://metadata.datadrivendiscovery.org/types/TrueTarget".
if 'https://metadata.datadrivendiscovery.org/types/SuggestedTarget' in semantic_types:
return True
return False
def _get_columns(self, inputs_metadata: metadata_base.DataMetadata) -> typing.List[int]:
def can_use_column(column_index: int) -> bool:
return self._can_use_column(inputs_metadata, column_index)
columns_to_use, columns_not_to_use = base_utils.get_columns_to_use(inputs_metadata, self.hyperparams['use_columns'], self.hyperparams['exclude_columns'], can_use_column)
# We are OK if no columns ended up being parsed.
# "base_utils.combine_columns" will throw an error if it cannot work with this.
if self.hyperparams['use_columns'] and columns_not_to_use:
self.logger.warning("Not all specified columns can parsed. Skipping columns: %(columns)s", {
'columns': columns_not_to_use,
})
return columns_to_use
def _produce_columns(
self, inputs: Inputs,
add_semantic_types: typing.List[typing.List[str]],
remove_semantic_types: typing.List[typing.List[str]],
) -> typing.Tuple[typing.List[int], typing.List[Outputs]]:
columns_to_use = self._get_columns(inputs.metadata)
assert len(add_semantic_types) == len(remove_semantic_types), (len(add_semantic_types), len(remove_semantic_types))
if len(columns_to_use) != len(add_semantic_types):
raise exceptions.InvalidStateError("Producing on a different number of columns than fitting.")
output_columns = []
for column_index, column_add_semantic_types, column_remove_semantic_types in zip(columns_to_use, add_semantic_types, remove_semantic_types):
output_column = inputs.select_columns([column_index])
for remove_semantic_type in column_remove_semantic_types:
output_column.metadata = output_column.metadata.remove_semantic_type((metadata_base.ALL_ELEMENTS, 0), remove_semantic_type)
for add_semantic_type in column_add_semantic_types:
output_column.metadata = output_column.metadata.add_semantic_type((metadata_base.ALL_ELEMENTS, 0), add_semantic_type)
output_columns.append(output_column)
assert len(output_columns) == len(columns_to_use)
return columns_to_use, output_columns
def _is_boolean(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'boolean'
def _is_categorical(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'categorical'
def _is_integer(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'integer'
def _is_text(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'string'
def _is_datetime(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'dateTime'
def _is_float(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'real'
def _is_float_vector(self, input_column: container.DataFrame) -> bool:
return self._predict_semantic_type(input_column) == 'realVector'
def _is_unique_key(self, input_column: container.DataFrame) -> bool:
column_values = input_column.iloc[:, 0]
# There should be at least one row. This prevents a degenerate case
# where we would mark a column of no rows as a unique key column.
# (Otherwise we also get division by zero below.)
if not len(column_values):
return False
# Here we look at every value as-is. Even empty strings and other missing/nan values.
if any(input_column.duplicated()):
return False
return True
def get_params(self) -> Params:
if not self._fitted:
return Params(
add_semantic_types=None,
remove_semantic_types=None,
)
return Params(
add_semantic_types=self._add_semantic_types,
remove_semantic_types=self._remove_semantic_types,
)
def set_params(self, *, params: Params) -> None:
self._add_semantic_types = params['add_semantic_types']
self._remove_semantic_types = params['remove_semantic_types']
self._fitted = all(param is not None for param in params.values())
| 55.427379
| 457
| 0.686756
|
dbf166d6a31ffbfe8bc13d910a173904c17e6264
| 391
|
py
|
Python
|
laundry/wsgi.py
|
adam-thomas/imperial-painter-laundry
|
47d9567950a1e4f42c6b26e12a94e7f382556fb4
|
[
"MIT"
] | null | null | null |
laundry/wsgi.py
|
adam-thomas/imperial-painter-laundry
|
47d9567950a1e4f42c6b26e12a94e7f382556fb4
|
[
"MIT"
] | 8
|
2020-02-12T00:02:24.000Z
|
2022-02-10T08:42:12.000Z
|
laundry/wsgi.py
|
adam-thomas/imperial-painter-laundry
|
47d9567950a1e4f42c6b26e12a94e7f382556fb4
|
[
"MIT"
] | null | null | null |
"""
WSGI config for painter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "laundry.settings")
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
8e84f71a1a3d26db9062e4fb05a0d62f0809cd0c
| 18,587
|
py
|
Python
|
ppdet/engine/tracker.py
|
ReverseSacle/FairMOT-Paddle-Tracker_Basic
|
b1caa291a85131683620fc93fe7bed3fbefcc791
|
[
"MIT"
] | 3
|
2021-08-04T06:37:13.000Z
|
2022-03-21T09:07:29.000Z
|
ppdet/engine/tracker.py
|
ReverseSacle/FairMOT-Paddle-Tracker_Basic
|
b1caa291a85131683620fc93fe7bed3fbefcc791
|
[
"MIT"
] | null | null | null |
ppdet/engine/tracker.py
|
ReverseSacle/FairMOT-Paddle-Tracker_Basic
|
b1caa291a85131683620fc93fe7bed3fbefcc791
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import glob
import paddle
import numpy as np
from ppdet.core.workspace import create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
from ppdet.modeling.mot.utils import Timer, load_det_results
from ppdet.modeling.mot import visualization as mot_vis
from ppdet.metrics import Metric, MOTMetric
import ppdet.utils.stats as stats
from .callbacks import Callback, ComposeCallback
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['Tracker']
class Tracker(object):
def __init__(self, cfg, mode='eval'):
self.cfg = cfg
assert mode.lower() in ['test', 'eval'], \
"mode should be 'test' or 'eval'"
self.mode = mode.lower()
self.optimizer = None
# build MOT data loader
self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
# build model
self.model = create(cfg.architecture)
self.status = {}
self.start_epoch = 0
# initial default callbacks
self._init_callbacks()
# initial default metrics
self._init_metrics()
self._reset_metrics()
def _init_callbacks(self):
self._callbacks = []
self._compose_callback = None
def _init_metrics(self):
if self.mode in ['test']:
self._metrics = []
return
if self.cfg.metric == 'MOT':
self._metrics = [MOTMetric(), ]
else:
logger.warning("Metric not support for metric type {}".format(
self.cfg.metric))
self._metrics = []
def _reset_metrics(self):
for metric in self._metrics:
metric.reset()
def register_callbacks(self, callbacks):
callbacks = [h for h in list(callbacks) if h is not None]
for c in callbacks:
assert isinstance(c, Callback), \
"metrics shoule be instances of subclass of Metric"
self._callbacks.extend(callbacks)
self._compose_callback = ComposeCallback(self._callbacks)
def register_metrics(self, metrics):
metrics = [m for m in list(metrics) if m is not None]
for m in metrics:
assert isinstance(m, Metric), \
"metrics shoule be instances of subclass of Metric"
self._metrics.extend(metrics)
def load_weights_jde(self, weights):
load_weight(self.model, weights, self.optimizer)
def load_weights_sde(self, det_weights, reid_weights):
if self.model.detector:
load_weight(self.model.detector, det_weights)
load_weight(self.model.reid, reid_weights)
else:
load_weight(self.model.reid, reid_weights, self.optimizer)
def _eval_seq_jde(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
draw_threshold=0):
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
tracker = self.model.tracker
tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)
timer = Timer()
results = []
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
for step_id, data in enumerate(dataloader):
self.status['step_id'] = step_id
if frame_id % 40 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(
frame_id, 1. / max(1e-5, timer.average_time)))
# forward
timer.tic()
pred_dets, pred_embs = self.model(data)
online_targets = self.model.tracker.update(pred_dets, pred_embs)
online_tlwhs, online_ids = [], []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
if tscore < draw_threshold: continue
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > tracker.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(tscore)
timer.toc()
# save results
results.append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
self.save_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def _eval_seq_sde(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
det_file='',
draw_threshold=0):
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
tracker = self.model.tracker
use_detector = False if not self.model.detector else True
timer = Timer()
results = []
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
self.model.reid.eval()
if not use_detector:
dets_list = load_det_results(det_file, len(dataloader))
logger.info('Finish loading detection results file {}.'.format(
det_file))
for step_id, data in enumerate(dataloader):
self.status['step_id'] = step_id
if frame_id % 40 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(
frame_id, 1. / max(1e-5, timer.average_time)))
ori_image = data['ori_image']
input_shape = data['image'].shape[2:]
im_shape = data['im_shape']
scale_factor = data['scale_factor']
timer.tic()
if not use_detector:
dets = dets_list[frame_id]
bbox_tlwh = paddle.to_tensor(dets['bbox'], dtype='float32')
pred_scores = paddle.to_tensor(dets['score'], dtype='float32')
if pred_scores < draw_threshold: continue
if bbox_tlwh.shape[0] > 0:
pred_bboxes = paddle.concat(
(bbox_tlwh[:, 0:2],
bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
axis=1)
else:
pred_bboxes = []
pred_scores = []
else:
outs = self.model.detector(data)
if outs['bbox_num'] > 0:
pred_bboxes = scale_coords(outs['bbox'][:, 2:], input_shape,
im_shape, scale_factor)
pred_scores = outs['bbox'][:, 1:2]
else:
pred_bboxes = []
pred_scores = []
pred_bboxes = clip_box(pred_bboxes, input_shape, im_shape,
scale_factor)
bbox_tlwh = paddle.concat(
(pred_bboxes[:, 0:2],
pred_bboxes[:, 2:4] - pred_bboxes[:, 0:2] + 1),
axis=1)
crops, pred_scores = get_crops(
pred_bboxes, ori_image, pred_scores, w=64, h=192)
crops = paddle.to_tensor(crops)
pred_scores = paddle.to_tensor(pred_scores)
data.update({'crops': crops})
features = self.model(data)
features = features.numpy()
detections = [
Detection(tlwh, score, feat)
for tlwh, score, feat in zip(bbox_tlwh, pred_scores, features)
]
self.model.tracker.predict()
online_targets = self.model.tracker.update(detections)
online_tlwhs = []
online_scores = []
online_ids = []
for track in online_targets:
if not track.is_confirmed() or track.time_since_update > 1:
continue
online_tlwhs.append(track.to_tlwh())
online_scores.append(1.0)
online_ids.append(track.track_id)
timer.toc()
# save results
results.append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
self.save_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def mot_evaluate(self,
data_root,
seqs,
output_dir,
data_type='mot',
model_type='JDE',
save_images=False,
save_videos=False,
show_image=False,
det_results_dir=''):
if not os.path.exists(output_dir): os.makedirs(output_dir)
result_root = os.path.join(output_dir, 'mot_results')
if not os.path.exists(result_root): os.makedirs(result_root)
assert data_type in ['mot', 'kitti'], \
"data_type should be 'mot' or 'kitti'"
assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
"model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
# run tracking
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
save_dir = os.path.join(output_dir, 'mot_outputs',
seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
infer_dir = os.path.join(data_root, seq, 'img1')
images = self.get_infer_images(infer_dir)
self.dataset.set_images(images)
dataloader = create('EvalMOTReader')(self.dataset, 0)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
meta_info.find('\nseqLength')])
with paddle.no_grad():
if model_type in ['JDE', 'FairMOT']:
results, nf, ta, tc = self._eval_seq_jde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate)
elif model_type in ['DeepSORT']:
results, nf, ta, tc = self._eval_seq_sde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
det_file=os.path.join(det_results_dir,
'{}.txt'.format(seq)))
else:
raise ValueError(model_type)
self.write_mot_results(result_filename, results, data_type)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
if save_videos:
output_video_path = os.path.join(save_dir, '..',
'{}_vis.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
save_dir, output_video_path)
os.system(cmd_str)
logger.info('Save video in {}.'.format(output_video_path))
logger.info('Evaluate seq: {}'.format(seq))
# update metrics
for metric in self._metrics:
metric.update(data_root, seq, data_type, result_root,
result_filename)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
all_time, 1.0 / avg_time))
# accumulate metric to log out
for metric in self._metrics:
metric.accumulate()
metric.log()
# reset metric states for metric may performed multiple times
self._reset_metrics()
def get_infer_images(self, infer_dir):
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
images = set()
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
images.sort()
assert len(images) > 0, "no image found in {}".format(infer_dir)
logger.info("Found {} inference images in total.".format(len(images)))
return images
def mot_predict(self,
video_file,
output_dir,
data_type='mot',
model_type='JDE',
save_images=False,
save_videos=True,
show_image=False,
det_results_dir='',
draw_threshold=0.5):
if not os.path.exists(output_dir): os.makedirs(output_dir)
result_root = os.path.join(output_dir, 'mot_results')
if not os.path.exists(result_root): os.makedirs(result_root)
assert data_type in ['mot', 'kitti'], \
"data_type should be 'mot' or 'kitti'"
assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
"model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
# run tracking
seq = video_file.split('/')[-1].split('.')[0]
save_dir = os.path.join(output_dir, 'mot_outputs',
seq) if save_images or save_videos else None
logger.info('Starting tracking {}'.format(video_file))
self.dataset.set_video(video_file)
dataloader = create('TestMOTReader')(self.dataset, 0)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
frame_rate = self.dataset.frame_rate
with paddle.no_grad():
if model_type in ['JDE', 'FairMOT']:
results, nf, ta, tc = self._eval_seq_jde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
draw_threshold=draw_threshold)
elif model_type in ['DeepSORT']:
results, nf, ta, tc = self._eval_seq_sde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
det_file=os.path.join(det_results_dir,
'{}.txt'.format(seq)),
draw_threshold=draw_threshold)
else:
raise ValueError(model_type)
self.write_mot_results(result_filename, results, data_type)
if save_videos:
output_video_path = os.path.join(save_dir, '..',
'{}_vis.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
save_dir, output_video_path)
os.system(cmd_str)
logger.info('Save video in {}'.format(output_video_path))
def write_mot_results(self, filename, results, data_type='mot'):
if data_type in ['mot', 'mcmot', 'lab']:
save_format = '{frame},{id},{x1},{y1},{w},{h},{score},-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, tscores, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id,
id=track_id,
x1=x1,
y1=y1,
x2=x2,
y2=y2,
w=w,
h=h,
score=score)
f.write(line)
logger.info('MOT results save in {}'.format(filename))
def save_results(self, data, frame_id, online_ids, online_tlwhs,
online_scores, average_time, show_image, save_dir):
if show_image or save_dir is not None:
assert 'ori_image' in data
img0 = data['ori_image'].numpy()[0]
online_im = mot_vis.plot_tracking(
img0,
online_tlwhs,
online_ids,
online_scores,
frame_id=frame_id,
fps=1. / average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(
os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
online_im)
| 39.295983
| 115
| 0.531931
|
fdc3a6b8c185264c6ee3628efb66b70cb689255a
| 5,987
|
py
|
Python
|
src/garage/torch/algos/trpo.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | 7
|
2022-02-01T03:02:24.000Z
|
2022-02-10T12:54:05.000Z
|
src/garage/torch/algos/trpo.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | null | null | null |
src/garage/torch/algos/trpo.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | 2
|
2022-02-03T03:33:25.000Z
|
2022-02-10T12:54:07.000Z
|
"""Trust Region Policy Optimization."""
import torch
from garage.torch.algos import VPG
from garage.torch.optimizers import ConjugateGradientOptimizer
from garage.torch.optimizers import OptimizerWrapper
class TRPO(VPG):
"""Trust Region Policy Optimization (TRPO).
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_path_length (int): Maximum length of a single rollout.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(self,
env_spec,
policy,
value_function,
policy_optimizer=None,
vf_optimizer=None,
max_path_length=100,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy'):
if policy_optimizer is None:
policy_optimizer = OptimizerWrapper(
(ConjugateGradientOptimizer, dict(max_constraint_value=0.01)),
policy)
if vf_optimizer is None:
vf_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
super().__init__(env_spec=env_spec,
policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=max_path_length,
num_train_per_epoch=num_train_per_epoch,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method)
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
with torch.no_grad():
old_ll = self._old_policy(obs)[0].log_prob(actions)
new_ll = self.policy(obs)[0].log_prob(actions)
likelihood_ratio = (new_ll - old_ll).exp()
# Calculate surrogate
surrogate = likelihood_ratio * advantages
return surrogate
def _train_policy(self, obs, actions, rewards, advantages):
r"""Train the policy.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of policy loss (float).
"""
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step(
f_loss=lambda: self._compute_loss_with_adv(obs, actions, rewards,
advantages),
f_constraint=lambda: self._compute_kl_constraint(obs))
return loss
| 41.289655
| 78
| 0.585435
|
3da96b68d9ea1b01b1a8bf8753d2b9ad989f2819
| 5,348
|
py
|
Python
|
vcbot/helpers/utils.py
|
PEARLGANG/PyTgCallsVC
|
c34b06ef2641402c49d38067a33946a81f0fe2db
|
[
"MIT"
] | null | null | null |
vcbot/helpers/utils.py
|
PEARLGANG/PyTgCallsVC
|
c34b06ef2641402c49d38067a33946a81f0fe2db
|
[
"MIT"
] | null | null | null |
vcbot/helpers/utils.py
|
PEARLGANG/PyTgCallsVC
|
c34b06ef2641402c49d38067a33946a81f0fe2db
|
[
"MIT"
] | null | null | null |
import os
import json
import asyncio
import subprocess
from vcbot.config import Var
from youtube_dl import YoutubeDL
from pyrogram.types import Message
def get_readable_time(seconds: int) -> str:
count = 0
readable_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", " days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
readable_time += time_list.pop() + ", "
time_list.reverse()
readable_time += ": ".join(time_list)
return readable_time
def raw_converter(source, vid, audio, log_file='ffmpeg.log'):
# log_file = open(log_file, 'w')
cmd = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", source, "-f", "s16le", "-ac", "1", "-ar", "48000", audio, "-f", "rawvideo", '-r', '20', '-pix_fmt', 'yuv420p', '-vf', 'scale=854:480', vid]
return subprocess.Popen(
cmd,
stdin=None,
stdout=None,
stderr=None,
cwd=None,
)
async def is_ytlive(url):
ydl_opts = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': False
}
with YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(url, download=False)
return info_dict.get('is_live')
async def convert_to_stream(url: str):
cmd = ["youtube-dl", "-g", url]
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, _ = await proc.communicate()
if stdout:
return stdout.decode().strip()
async def transcode(file_path: str, delete=True):
audio_f = file_path.split(".")[0] + 'audio' + ".raw"
video_f = file_path.split(".")[0] + 'video' + ".raw"
if (os.path.isfile(audio_f) and (os.path.isfile(video_f))):
return audio_f, video_f
cmd = ["ffmpeg", "-hide_banner", "-loglevel", "error", "-y", "-i", file_path, "-f", "s16le", "-ac", "1", "-ar", "48000", audio_f, "-f", "rawvideo", '-r', '20', '-pix_fmt', 'yuv420p', '-vf', 'scale=854:480', video_f]
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
await proc.communicate()
if proc.returncode != 0:
print(f"Transcode failed for {file_path}")
return None
if delete:
try:
os.remove(file_path)
except BaseException:
...
return audio_f, video_f
async def get_video_info(filename):
proc = await asyncio.create_subprocess_exec('ffprobe', '-hide_banner', '-print_format', 'json', '-show_format', '-show_streams', filename, stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
return json.loads(stdout)
async def get_backdrop_res(url):
info = await get_video_info(url)
width = None
height = None
for each in info['streams']:
try:
width = int(each['width'])
height = int(each['height'])
break
except (KeyError or AttributeError):
continue
if height:
if not width:
width, height = get_resolution({'height': height})
return (width, height)
# got this from somewhere
def get_resolution(info_dict):
if {"width", "height"} <= info_dict.keys():
width = int(info_dict['width'])
height = int(info_dict['height'])
# https://support.google.com/youtube/answer/6375112
elif info_dict.get("height") == 1080:
width = 1920
height = 1080
elif info_dict.get("height") == 720:
width = 1280
height = 720
elif info_dict.get("height") == 480:
width = 854
height = 480
elif info_dict.get("height") == 360:
width = 640
height = 360
elif info_dict.get("height") == 240:
width = 426
height = 240
else:
return None
return (width, height)
def my_hook(self, d):
if d['status'] == 'finished':
file_tuple = os.path.split(os.path.abspath(d['filename']))
print("Done downloading {}".format(file_tuple[1]))
if d['status'] == 'downloading':
p = d['_percent_str']
p = p.replace('%','')
self.progress.setValue(float(p))
print(d['filename'], d['_percent_str'], d['_eta_str'])
async def yt_download(ytlink):
ydl_opts = {
'format': f'bestvideo[height<={Var.HEIGHT},ext=mp4]+bestaudio[ext=m4a]',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': False,
'progress_hook': [my_hook]
}
with YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(ytlink, download=False)
res = get_resolution(info_dict)
if not res:
res = await get_backdrop_res(ytlink)
ydl.process_info(info_dict)
_file = ydl.prepare_filename(info_dict)
return _file, res
async def tg_download(m: Message):
path = await m.download()
return path
| 33.848101
| 219
| 0.596298
|
1a3b0a0c21b7d34292ed8fd5f2d28e63873029cd
| 1,997
|
py
|
Python
|
docs/conf.py
|
RangelReale/kg_keycloak
|
fd47dbd48b8df74dcba8161d2c811598345d9207
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
RangelReale/kg_keycloak
|
fd47dbd48b8df74dcba8161d2c811598345d9207
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
RangelReale/kg_keycloak
|
fd47dbd48b8df74dcba8161d2c811598345d9207
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'kg_keycloak'
copyright = '2020, Rangel Reale'
author = 'Rangel Reale'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx_rtd_theme', 'm2r2']
# source_suffix = '.rst'
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/style.css'
| 35.035088
| 79
| 0.665498
|
17b08313d0324319f4b6ee8d91b2efece0669b68
| 1,329
|
py
|
Python
|
1º Semestre/Aula 06 - 24_09/aula06_ok.py
|
thaistlsantos/Python-FIT
|
2467a4af0083bdae6233a1a6a4af0e6310f3b9c3
|
[
"MIT"
] | null | null | null |
1º Semestre/Aula 06 - 24_09/aula06_ok.py
|
thaistlsantos/Python-FIT
|
2467a4af0083bdae6233a1a6a4af0e6310f3b9c3
|
[
"MIT"
] | null | null | null |
1º Semestre/Aula 06 - 24_09/aula06_ok.py
|
thaistlsantos/Python-FIT
|
2467a4af0083bdae6233a1a6a4af0e6310f3b9c3
|
[
"MIT"
] | null | null | null |
def soma():
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
s = n1 + n2
print(s)
return
print('fim da função! (primeira linha fora da função)')
print('chamando a função...')
soma()
print('chamando novamente a função...')
soma()
"""
Exercício:
Receber o valor de uma compra e a opção
relativa a forma de pagamento.
-------------------------------------------------------------
| Opção | Forma de pagamento |
- ------- ------------------------------------------------- -
| 1 | A vista em dinheiro - 10% de desconto |
| 2 | A vista no cartão - 5% de desconto |
| 3 | Em 2x no cartão - Sem desconto e sem juros |
| 4 | Em 5x no cartão - 10% de juros |
-------------------------------------------------------------
Exibir o valor total a pagar (com juros ou desconto),
o número de parcelas e o valor de cada parcela
"""
#0 1 2 3
if opcao == 1:
# dar 10% de desconto
else:
# sei que não é 1
if opcao == 2:
# dar 5% de desconto
else:
# sei que não é 1 nem 2
if opcao == 3:
# preço normal em 2x
else:
# sei que não é 1, nem 2 e nem 3
# cobrar 10% de juros e
# parcelar em 5x
| 27.122449
| 63
| 0.455229
|
0bed488a830e6fc584458a93db308cea51da26eb
| 4,033
|
py
|
Python
|
carball/analysis/stats/stats.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 119
|
2018-09-14T02:14:19.000Z
|
2022-03-06T05:06:54.000Z
|
carball/analysis/stats/stats.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 207
|
2018-09-06T18:53:06.000Z
|
2022-02-12T22:39:36.000Z
|
carball/analysis/stats/stats.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 44
|
2018-09-10T16:54:13.000Z
|
2022-02-19T03:07:50.000Z
|
import logging
from typing import Dict
import pandas as pd
from ...generated.api import game_pb2
from ...generated.api.player_pb2 import Player
from ...generated.api.stats.events_pb2 import Hit
from ...generated.api.stats.player_stats_pb2 import PlayerStats
from ...generated.api.stats.team_stats_pb2 import TeamStats
from ...json_parser.game import Game
class BaseStat:
def __init__(self):
self.logger = logging.getLogger(type(self).__name__)
def calculate_stat(self, proto_stat, game: Game, proto_game: game_pb2.Game, player_map: Dict[str, Player],
data_frame: pd.DataFrame):
"""
Calculates stats that applies to the general game or applies to players + teams at the same time.
:param proto_stat: This is protobuf object for general game stats
:param game: The raw data that has been created from python.
:param proto_game: A protobuf that contains some parsed stats + all metadata for the game.
:param player_map: A map of playerId to the protobuf Player object
:param data_frame: The raw frames of the replay this is the same object as `game.frames`
"""
raise NotImplementedError()
def calculate_player_stat(self, player_stat_map: Dict[str, PlayerStats], game: Game, proto_game: game_pb2.Game,
player_map: Dict[str, Player], data_frame: pd.DataFrame):
"""
Calculates stats that only apply to players.
:param player_stat_map: A map of playerId to the specific proto object for stats for that player.
:param game: The raw data that has been created from python.
:param proto_game: A protobuf that contains some parsed stats + all metadata for the game.
:param player_map: A map of playerId to the protobuf Player object
:param data_frame: The raw frames of the replay this is the same object as `game.frames`
"""
raise NotImplementedError()
def calculate_team_stat(self, team_stat_list: Dict[int, TeamStats], game: Game, proto_game: game_pb2.Game,
player_map: Dict[str, Player], data_frame: pd.DataFrame):
"""
Calculate stats that only applies to teams
:param team_stat_list: I map of team id to the specific proto object for stats for that team
:param game: The raw data that has been created from python.
:param proto_game: A protobuf that contains some parsed stats + all metadata for the game.
:param player_map: A map of playerId to the protobuf Player object
:param data_frame: The raw frames of the replay this is the same object as `game.frames`
"""
raise NotImplementedError()
class HitStat:
def initialize_hit_stat(self, game: Game, player_map: Dict[str, Player], data_frame: pd.DataFrame):
"""
Called only once at the beginning of stat creation.
:param game: The raw data that has been created from python.
:param player_map: A map of playerId to the protobuf Player object
:param data_frame: The raw frames of the replay this is the same object as `game.frames`
"""
raise NotImplementedError()
def calculate_next_hit_stat(self, game: Game, proto_game: game_pb2.Game, saltie_hit: Hit, next_saltie_hit: Hit,
player_map: Dict[str, Player], hit_index: int):
"""
Calculate stats that use only the current hit + the next hit.
:param game: The raw data that has been created from python.
:param proto_game: A protobuf that contains some parsed stats + all metadata for the game.
:param saltie_hit: The current hit we are looking at, this is a protobuf object.
:param next_saltie_hit: The hit that occured after the current one.
:param player_map: A map of playerId to the protobuf Player object
:param hit_index: The index in the list of protobuf hits where the current hit is listed.
"""
raise NotImplementedError()
| 51.705128
| 115
| 0.68733
|
15fa6dbf3ad8f4fcbf61e352f1ce50305dc2331c
| 131
|
py
|
Python
|
posthog/queries/abstract_test/test_compare.py
|
avoajaugochukwu/posthog
|
7e7fd42b0542ebc4734aedb926df11d462e3dd4f
|
[
"MIT"
] | 7,409
|
2020-02-09T23:18:10.000Z
|
2022-03-31T22:36:25.000Z
|
posthog/queries/abstract_test/test_compare.py
|
avoajaugochukwu/posthog
|
7e7fd42b0542ebc4734aedb926df11d462e3dd4f
|
[
"MIT"
] | 5,709
|
2020-02-09T23:26:13.000Z
|
2022-03-31T20:20:01.000Z
|
posthog/queries/abstract_test/test_compare.py
|
avoajaugochukwu/posthog
|
7e7fd42b0542ebc4734aedb926df11d462e3dd4f
|
[
"MIT"
] | 647
|
2020-02-13T17:50:55.000Z
|
2022-03-31T11:24:19.000Z
|
from abc import ABC, abstractmethod
class AbstractCompareTest(ABC):
@abstractmethod
def test_compare(self):
pass
| 16.375
| 35
| 0.717557
|
80faf7716d3064c09a81e060b1b10a5f4f8a2620
| 126
|
py
|
Python
|
project/server/main/tasks.py
|
rochacbruno/flask-redis-queue
|
1615d9fe1ad6658343d58df22e6aca63a3ffbbf3
|
[
"MIT"
] | 1
|
2021-08-12T18:27:28.000Z
|
2021-08-12T18:27:28.000Z
|
project/server/main/tasks.py
|
rochacbruno/flask-redis-queue
|
1615d9fe1ad6658343d58df22e6aca63a3ffbbf3
|
[
"MIT"
] | null | null | null |
project/server/main/tasks.py
|
rochacbruno/flask-redis-queue
|
1615d9fe1ad6658343d58df22e6aca63a3ffbbf3
|
[
"MIT"
] | null | null | null |
# project/server/main/tasks.py
import time
def create_task(task_type):
time.sleep(int(task_type) * 10)
return True
| 14
| 35
| 0.714286
|
8fdfc65a7e5fb22d50c0c144ef0bf013266acee7
| 814
|
py
|
Python
|
airdialogue/codalab/fake_model.py
|
HMJiangGatech/airdialogue
|
fca56769fefec2f1ecccb309cf666e368166473f
|
[
"Apache-2.0"
] | 33
|
2019-11-18T06:22:37.000Z
|
2022-03-07T15:04:16.000Z
|
airdialogue/codalab/fake_model.py
|
HMJiangGatech/airdialogue
|
fca56769fefec2f1ecccb309cf666e368166473f
|
[
"Apache-2.0"
] | 4
|
2020-05-22T04:11:00.000Z
|
2020-07-01T22:51:31.000Z
|
airdialogue/codalab/fake_model.py
|
HMJiangGatech/airdialogue
|
fca56769fefec2f1ecccb309cf666e368166473f
|
[
"Apache-2.0"
] | 9
|
2019-12-02T22:57:17.000Z
|
2021-10-16T09:28:42.000Z
|
import json
import random
import sys
if __name__ == "__main__":
output_txt = open(sys.argv[1], "w+")
data = open(sys.argv[2], "r")
if len(sys.argv) > 3:
kb = open(sys.argv[3], "r")
for s in data:
# Response in format of "utterance | name | flight | action"
response = "Hi I am an agent. How can I help today? | | | "
obj = json.loads(s)
d = {"dialogue": obj["dialogue"], "action": obj["dialogue"]}
if "intent" in obj.keys():
d["intent"] = obj["intent"]
response = "Hi I am a client. I want to book a flight."
else:
# Agent
if random.randint(1, 20) < 10 and len(d["dialogue"]) > 5:
response = ("I have reserved for James Smith. | James Smith | 12345 | "
"book")
output_txt.write(response + "\n")
output_txt.close()
| 29.071429
| 79
| 0.570025
|
bc659d1c9cb27a0ae18edb6a44af570dda1c5a67
| 1,373
|
py
|
Python
|
tests/v4/conftest.py
|
maxalbert/tohu
|
3adf0c58b13ef1e1d716d7d613484d2adc58fb60
|
[
"MIT"
] | 1
|
2019-03-07T19:58:45.000Z
|
2019-03-07T19:58:45.000Z
|
tests/v4/conftest.py
|
maxalbert/tohu
|
3adf0c58b13ef1e1d716d7d613484d2adc58fb60
|
[
"MIT"
] | 9
|
2017-10-04T15:08:53.000Z
|
2021-02-02T21:51:41.000Z
|
tests/v4/conftest.py
|
maxalbert/tohu
|
3adf0c58b13ef1e1d716d7d613484d2adc58fb60
|
[
"MIT"
] | null | null | null |
import os
from .context import tohu
from tohu.v4.primitive_generators import *
from tohu.v4.derived_generators import *
from tohu.v4.dispatch_generators import *
__all__ = ['EXEMPLAR_GENERATORS', 'EXEMPLAR_PRIMITIVE_GENERATORS', 'EXEMPLAR_DERIVED_GENERATORS']
def add(x, y):
return x + y
here = os.path.abspath(os.path.dirname(__file__))
geojson_filename = os.path.join(here, '..', '..', 'tohu', 'data', 'admin_0_countries.geojson')
EXEMPLAR_PRIMITIVE_GENERATORS = [
Boolean(p=0.3),
CharString(length=12, charset='<alphanumeric>'),
Constant("quux"),
DigitString(length=20),
FakerGenerator(method="name"),
Float(12.34, 56.78),
#GeoJSONGeolocation(geojson_filename, include_attributes=['name', 'pop_est']),
HashDigest(length=6),
Integer(100, 200),
IterateOver('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcde', p=[0.1, 0.05, 0.7, 0.03, 0.12]),
Sequential(prefix='Foobar_', digits=3),
Timestamp(date='2018-01-01'),
]
EXEMPLAR_DERIVED_GENERATORS = [
Apply(add, Integer(100, 200), Integer(300, 400)),
Apply(add, Apply(add, Integer(100, 200), Integer(300, 400)), Apply(add, Integer(500, 600), Integer(700, 800))),
]
EXEMPLAR_CUSTOM_GENERATORS = []
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
| 33.487805
| 115
| 0.716679
|
1f462c518807b12340774f703fe0af155ba4618d
| 18,587
|
py
|
Python
|
google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py
|
dizcology/python-aiplatform
|
1a135775966c8a2303ded529eba514dcf9db7205
|
[
"Apache-2.0"
] | 2
|
2021-10-02T02:25:44.000Z
|
2021-11-17T10:35:01.000Z
|
google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | 1
|
2021-03-02T18:25:00.000Z
|
2021-03-02T18:25:00.000Z
|
google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import (
export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config,
)
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",},
)
class AutoMlTables(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Tables Model.
Attributes:
inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs):
The input parameters of this TrainingJob.
metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata):
The metadata information.
"""
inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",)
metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",)
class AutoMlTablesInputs(proto.Message):
r"""
Attributes:
optimization_objective_recall_value (float):
Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1,
inclusive.
optimization_objective_precision_value (float):
Required when optimization_objective is
"maximize-recall-at-precision". Must be between 0 and 1,
inclusive.
prediction_type (str):
The type of prediction the Model is to
produce. "classification" - Predict one out of
multiple target values is
picked for each row.
"regression" - Predict a value based on its
relation to other values. This
type is available only to columns that contain
semantically numeric values, i.e. integers or
floating point number, even if
stored as e.g. strings.
target_column (str):
The column name of the target column that the
model is to predict.
transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]):
Each transformation will apply transform
function to given input column. And the result
will be used for training. When creating
transformation for BigQuery Struct column, the
column should be flattened using "." as the
delimiter.
optimization_objective (str):
Objective function the model is optimizing
towards. The training process creates a model
that maximizes/minimizes the value of the
objective function over the validation set.
The supported optimization objectives depend on
the prediction type. If the field is not set, a
default objective function is used.
classification (binary):
"maximize-au-roc" (default) - Maximize the
area under the receiver
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under
the precision-recall curve. "maximize-
precision-at-recall" - Maximize precision for a
specified
recall value. "maximize-recall-at-precision" -
Maximize recall for a specified
precision value.
classification (multi-class):
"minimize-log-loss" (default) - Minimize log
loss.
regression:
"minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
(RMSLE).
train_budget_milli_node_hours (int):
Required. The train budget of creating this
model, expressed in milli node hours i.e. 1,000
value in this field means 1 node hour.
The training cost of the model will not exceed
this budget. The final cost will be attempted to
be close to the budget, though may end up being
(even) noticeably smaller - at the backend's
discretion. This especially may happen when
further model training ceases to provide any
improvements.
If the budget is set to a value known to be
insufficient to train a model for the given
dataset, the training won't be attempted and
will error.
The train budget must be between 1,000 and
72,000 milli node hours, inclusive.
disable_early_stopping (bool):
Use the entire training budget. This disables
the early stopping feature. By default, the
early stopping feature is enabled, which means
that AutoML Tables might stop training before
the entire training budget has been used.
weight_column_name (str):
Column name that should be used as the weight
column. Higher values in this column give more
importance to the row during model training. The
column must have numeric values between 0 and
10000 inclusively; 0 means the row is ignored
for training. If weight column field is not set,
then all rows are assumed to have equal weight
of 1.
export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig):
Configuration for exporting test set
predictions to a BigQuery table. If this
configuration is absent, then the export is not
performed.
additional_experiments (Sequence[str]):
Additional experiment flags for the Tables
training pipeline.
"""
class Transformation(proto.Message):
r"""
Attributes:
auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation):
numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation):
categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation):
timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation):
text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation):
repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation):
repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation):
repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation):
"""
class AutoTransformation(proto.Message):
r"""Training pipeline will infer the proper transformation based
on the statistic of dataset.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class NumericTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The value converted to float32.
- The z_score of the value.
- log(value+1) when the value is greater than or equal to 0.
Otherwise, this transformation is not applied and the value is
considered a missing value.
- z_score of log(value+1) when the value is greater than or equal
to 0. Otherwise, this transformation is not applied and the value
is considered a missing value.
- A boolean value that indicates whether the value is valid.
Attributes:
column_name (str):
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1,)
invalid_values_allowed = proto.Field(proto.BOOL, number=2,)
class CategoricalTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The categorical string as is--no change to case, punctuation,
spelling, tense, and so on.
- Convert the category name to a dictionary lookup index and
generate an embedding for each index.
- Categories that appear less than 5 times in the training dataset
are treated as the "unknown" category. The "unknown" category
gets its own special lookup index and resulting embedding.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class TimestampTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- Apply the transformation functions for Numerical columns.
- Determine the year, month, day,and weekday. Treat each value from
the
- timestamp as a Categorical column.
- Invalid numerical values (for example, values that fall outside
of a typical timestamp range, or are extreme values) receive no
special treatment and are not removed.
Attributes:
column_name (str):
time_format (str):
The format in which that time field is expressed. The
time_format must either be one of:
- ``unix-seconds``
- ``unix-milliseconds``
- ``unix-microseconds``
- ``unix-nanoseconds`` (for respectively number of seconds,
milliseconds, microseconds and nanoseconds since start of
the Unix epoch); or be written in ``strftime`` syntax. If
time_format is not set, then the default format is RFC
3339 ``date-time`` format, where ``time-offset`` =
``"Z"`` (e.g. 1985-04-12T23:20:50.52Z)
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1,)
time_format = proto.Field(proto.STRING, number=2,)
invalid_values_allowed = proto.Field(proto.BOOL, number=3,)
class TextTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The text as is--no change to case, punctuation, spelling, tense,
and so on.
- Tokenize text to words. Convert each words to a dictionary lookup
index and generate an embedding for each index. Combine the
embedding of all elements into a single embedding using the mean.
- Tokenization is based on unicode script boundaries.
- Missing values get their own lookup index and resulting
embedding.
- Stop-words receive no special treatment and are not removed.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class NumericArrayTransformation(proto.Message):
r"""Treats the column as numerical array and performs following
transformation functions.
- All transformations for Numerical types applied to the average of
the all elements.
- The average of empty arrays is treated as zero.
Attributes:
column_name (str):
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1,)
invalid_values_allowed = proto.Field(proto.BOOL, number=2,)
class CategoricalArrayTransformation(proto.Message):
r"""Treats the column as categorical array and performs following
transformation functions.
- For each element in the array, convert the category name to a
dictionary lookup index and generate an embedding for each index.
Combine the embedding of all elements into a single embedding
using the mean.
- Empty arrays treated as an embedding of zeroes.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class TextArrayTransformation(proto.Message):
r"""Treats the column as text array and performs following
transformation functions.
- Concatenate all text values in the array into a single text value
using a space (" ") as a delimiter, and then treat the result as
a single text value. Apply the transformations for Text columns.
- Empty arrays treated as an empty text.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
auto = proto.Field(
proto.MESSAGE,
number=1,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.AutoTransformation",
)
numeric = proto.Field(
proto.MESSAGE,
number=2,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericTransformation",
)
categorical = proto.Field(
proto.MESSAGE,
number=3,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalTransformation",
)
timestamp = proto.Field(
proto.MESSAGE,
number=4,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TimestampTransformation",
)
text = proto.Field(
proto.MESSAGE,
number=5,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextTransformation",
)
repeated_numeric = proto.Field(
proto.MESSAGE,
number=6,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericArrayTransformation",
)
repeated_categorical = proto.Field(
proto.MESSAGE,
number=7,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation",
)
repeated_text = proto.Field(
proto.MESSAGE,
number=8,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextArrayTransformation",
)
optimization_objective_recall_value = proto.Field(
proto.FLOAT, number=5, oneof="additional_optimization_objective_config",
)
optimization_objective_precision_value = proto.Field(
proto.FLOAT, number=6, oneof="additional_optimization_objective_config",
)
prediction_type = proto.Field(proto.STRING, number=1,)
target_column = proto.Field(proto.STRING, number=2,)
transformations = proto.RepeatedField(
proto.MESSAGE, number=3, message=Transformation,
)
optimization_objective = proto.Field(proto.STRING, number=4,)
train_budget_milli_node_hours = proto.Field(proto.INT64, number=7,)
disable_early_stopping = proto.Field(proto.BOOL, number=8,)
weight_column_name = proto.Field(proto.STRING, number=9,)
export_evaluated_data_items_config = proto.Field(
proto.MESSAGE,
number=10,
message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig,
)
additional_experiments = proto.RepeatedField(proto.STRING, number=11,)
class AutoMlTablesMetadata(proto.Message):
r"""Model metadata specific to AutoML Tables.
Attributes:
train_cost_milli_node_hours (int):
Output only. The actual training cost of the
model, expressed in milli node hours, i.e. 1,000
value in this field means 1 node hour.
Guaranteed to not exceed the train budget.
"""
train_cost_milli_node_hours = proto.Field(proto.INT64, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 43.837264
| 166
| 0.633938
|
757331247cc6338d8ff1d498c1360be1f1e87ed2
| 8,236
|
py
|
Python
|
allennlp/data/dataset_readers/reading_comprehension/multiqa+.py
|
alontalmor/allennlp
|
3beb3ffff3ef45311c148301e91562b2000dff3b
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/dataset_readers/reading_comprehension/multiqa+.py
|
alontalmor/allennlp
|
3beb3ffff3ef45311c148301e91562b2000dff3b
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/dataset_readers/reading_comprehension/multiqa+.py
|
alontalmor/allennlp
|
3beb3ffff3ef45311c148301e91562b2000dff3b
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from typing import Any, Dict, List, Tuple
import zipfile,re, copy, random
import numpy as np
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.reading_comprehension import util
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# ALON - for line profiler
try:
profile
except NameError:
profile = lambda x: x
@DatasetReader.register("multiqa+")
class MultiQAReader(DatasetReader):
"""
Reads a JSON-formatted Quesiton Answering in Context (QuAC) data file
and returns a ``Dataset`` where the ``Instances`` have four fields: ``question``, a ``ListField``,
``passage``, another ``TextField``, and ``span_start`` and ``span_end``, both ``ListField`` composed of
IndexFields`` into the ``passage`` ``TextField``.
Two ``ListField``, composed of ``LabelField``, ``yesno_list`` and ``followup_list`` is added.
We also add a
``MetadataField`` that stores the instance's ID, the original passage text, gold answer strings,
and token offsets into the original passage, accessible as ``metadata['id']``,
``metadata['original_passage']``, ``metadata['answer_text_lists'] and ``metadata['token_offsets']``.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```WordTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
"""
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
sample_size: int = -1) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._sample_size = sample_size
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
def build_instances(self, header, instances):
# bucketing by QuestionID
instance_list = instances
instance_list = sorted(instance_list, key=lambda x: x['metadata']['question_id'])
intances_question_id = [instance['metadata']['question_id'] for instance in instance_list]
split_inds = [0] + list(np.cumsum(np.unique(intances_question_id, return_counts=True)[1]))
per_question_instances = [instance_list[split_inds[ind]:split_inds[ind + 1]] for ind in
range(len(split_inds) - 1)]
# sorting
sorting_keys = ['question_tokens', 'tokens']
instances_with_lengths = []
for instance in per_question_instances:
padding_lengths = {key: len(instance[0][key]) for key in sorting_keys}
instance_with_lengths = ([padding_lengths[field_name] for field_name in sorting_keys], instance)
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=lambda x: x[0])
per_question_instances = [instance_with_lengths[-1] for instance_with_lengths in instances_with_lengths]
# selecting instaces to add
filtered_instances = []
for question_instances in per_question_instances:
if header['split_type'] == 'dev':
instances_to_add = question_instances
else:
# choose at most 2 instances from the same question:
if len(question_instances) > 2:
# This part is inspired by Clark and Gardner, 17 - oversample the highest ranking documents.
# In thier work they use only instances with answers, so we will find the highest
# ranking instance with an answer (this also insures we have at least one answer in the chosen instances)
inst_with_answers = [inst for inst in question_instances if inst['answers'] != []]
instances_to_add = random.sample(inst_with_answers[0:2], 1)
# we assume each question will be visited once in an epoch
question_instances.remove(instances_to_add[0])
instances_to_add += random.sample(question_instances, 1)
else:
instances_to_add = question_instances
# Require at least one answer:
if not any(inst['answers'] != [] for inst in instances_to_add):
continue
filtered_instances += instances_to_add
#logger.info("multiqa+: yielding %d instances ", len(filtered_instances))
for inst_num, inst in enumerate(filtered_instances):
# if inst_num % 99 == 0:
# logger.info("yeilding inst_num %d",inst_num)
tokenized_paragraph = [Token(text=t[0], idx=t[1]) for t in inst['tokens']]
question_tokens = [Token(text=t[0], idx=t[1]) for t in inst['question_tokens']]
instance = util.make_reading_comprehension_instance_multiqa(question_tokens,
tokenized_paragraph,
self._token_indexers,
inst['text'],
inst['answers'],
inst['metadata'],
header)
yield instance
@profile
@overrides
def _read(self, file_path: str):
logger.info("Reading the dataset")
# supporting multi dataset training:
instances = []
total_questions_yielded = 0
for ind, single_file_path in enumerate(file_path.split(',')):
# if `file_path` is a URL, redirect to the cache
logger.info("Reading file at %s", single_file_path)
if single_file_path.find('jsonl') > 0:
single_file_path_cached = cached_path(single_file_path)
with zipfile.ZipFile(single_file_path_cached, 'r') as myzip:
with myzip.open(myzip.namelist()[0]) as myfile:
header = json.loads(myfile.readline())['header']
for line, example in enumerate(myfile):
# header
instances.append(json.loads(example))
if len(instances) > 2 and instances[-1]['metadata']['question_id'] != \
instances[-2]['metadata']['question_id']:
total_questions_yielded += 1
# supporting sample size
if self._sample_size > -1 and total_questions_yielded > self._sample_size:
break
# making sure not to take all instances of the same question
if len(instances)>10000 and instances[-1]['metadata']['question_id'] \
!= instances[-2]['metadata']['question_id']:
remainder = instances[-1]
instances = instances[:-1]
for instance in self.build_instances(header, instances):
yield instance
instances = [remainder]
# yielding the remainder
for instance in self.build_instances(header, instances):
yield instance
| 49.915152
| 125
| 0.573458
|
f6ab1cbff369241f26dd0ea4cec7c4365f2ef746
| 472
|
py
|
Python
|
GeneratorInterface/GenFilters/python/PythiaFilterGammaJetWithBg_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
GeneratorInterface/GenFilters/python/PythiaFilterGammaJetWithBg_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
GeneratorInterface/GenFilters/python/PythiaFilterGammaJetWithBg_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
gj_filter = cms.EDFilter("PythiaFilterGammaJetWithBg",
MaxEvents = cms.untracked.int32(2),
MaxPhotonEta = cms.untracked.double(2.8),
MaxPhotonPt = cms.untracked.double(22.0),
MinPhotonEtaForwardJet = cms.untracked.double(1.3),
MinDeltaPhi = cms.untracked.double(170.0),
MinPhotonPt = cms.untracked.double(18.0),
MaxDeltaEta = cms.untracked.double(1.3),
PhotonSeedPt = cms.untracked.double(5.0)
)
| 31.466667
| 55
| 0.724576
|
59c109ad84fc4cffab103846629ba75a62a49f62
| 11,205
|
py
|
Python
|
env/Lib/site-packages/jupyter_server/services/contents/handlers.py
|
JoaoPROFECIA/Ola-Mundo
|
071da385f93e9b652864c72226f3ef5afc15ba37
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/jupyter_server/services/contents/handlers.py
|
JoaoPROFECIA/Ola-Mundo
|
071da385f93e9b652864c72226f3ef5afc15ba37
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/jupyter_server/services/contents/handlers.py
|
JoaoPROFECIA/Ola-Mundo
|
071da385f93e9b652864c72226f3ef5afc15ba37
|
[
"MIT"
] | null | null | null |
"""Tornado handlers for the contents web service.
Preliminary documentation at https://github.com/ipython/ipython/wiki/IPEP-27%3A-Contents-Service
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
try:
from jupyter_client.jsonutil import json_default
except ImportError:
from jupyter_client.jsonutil import date_default as json_default
from tornado import web
from jupyter_server.auth import authorized
from jupyter_server.base.handlers import APIHandler, JupyterHandler, path_regex
from jupyter_server.utils import ensure_async, url_escape, url_path_join
AUTH_RESOURCE = "contents"
def validate_model(model, expect_content):
"""
Validate a model returned by a ContentsManager method.
If expect_content is True, then we expect non-null entries for 'content'
and 'format'.
"""
required_keys = {
"name",
"path",
"type",
"writable",
"created",
"last_modified",
"mimetype",
"content",
"format",
}
missing = required_keys - set(model.keys())
if missing:
raise web.HTTPError(
500,
f"Missing Model Keys: {missing}",
)
maybe_none_keys = ["content", "format"]
if expect_content:
errors = [key for key in maybe_none_keys if model[key] is None]
if errors:
raise web.HTTPError(
500,
f"Keys unexpectedly None: {errors}",
)
else:
errors = {key: model[key] for key in maybe_none_keys if model[key] is not None}
if errors:
raise web.HTTPError(
500,
f"Keys unexpectedly not None: {errors}",
)
class ContentsAPIHandler(APIHandler):
auth_resource = AUTH_RESOURCE
class ContentsHandler(ContentsAPIHandler):
def location_url(self, path):
"""Return the full URL location of a file.
Parameters
----------
path : unicode
The API path of the file, such as "foo/bar.txt".
"""
return url_path_join(self.base_url, "api", "contents", url_escape(path))
def _finish_model(self, model, location=True):
"""Finish a JSON request with a model, setting relevant headers, etc."""
if location:
location = self.location_url(model["path"])
self.set_header("Location", location)
self.set_header("Last-Modified", model["last_modified"])
self.set_header("Content-Type", "application/json")
self.finish(json.dumps(model, default=json_default))
@web.authenticated
@authorized
async def get(self, path=""):
"""Return a model for a file or directory.
A directory model contains a list of models (without content)
of the files and directories it contains.
"""
path = path or ""
type = self.get_query_argument("type", default=None)
if type not in {None, "directory", "file", "notebook"}:
raise web.HTTPError(400, "Type %r is invalid" % type)
format = self.get_query_argument("format", default=None)
if format not in {None, "text", "base64"}:
raise web.HTTPError(400, "Format %r is invalid" % format)
content = self.get_query_argument("content", default="1")
if content not in {"0", "1"}:
raise web.HTTPError(400, "Content %r is invalid" % content)
content = int(content)
model = await ensure_async(
self.contents_manager.get(
path=path,
type=type,
format=format,
content=content,
)
)
validate_model(model, expect_content=content)
self._finish_model(model, location=False)
@web.authenticated
@authorized
async def patch(self, path=""):
"""PATCH renames a file or directory without re-uploading content."""
cm = self.contents_manager
model = self.get_json_body()
if model is None:
raise web.HTTPError(400, "JSON body missing")
model = await ensure_async(cm.update(model, path))
validate_model(model, expect_content=False)
self._finish_model(model)
async def _copy(self, copy_from, copy_to=None):
"""Copy a file, optionally specifying a target directory."""
self.log.info(
"Copying {copy_from} to {copy_to}".format(
copy_from=copy_from,
copy_to=copy_to or "",
)
)
model = await ensure_async(self.contents_manager.copy(copy_from, copy_to))
self.set_status(201)
validate_model(model, expect_content=False)
self._finish_model(model)
async def _upload(self, model, path):
"""Handle upload of a new file to path"""
self.log.info("Uploading file to %s", path)
model = await ensure_async(self.contents_manager.new(model, path))
self.set_status(201)
validate_model(model, expect_content=False)
self._finish_model(model)
async def _new_untitled(self, path, type="", ext=""):
"""Create a new, empty untitled entity"""
self.log.info("Creating new %s in %s", type or "file", path)
model = await ensure_async(
self.contents_manager.new_untitled(path=path, type=type, ext=ext)
)
self.set_status(201)
validate_model(model, expect_content=False)
self._finish_model(model)
async def _save(self, model, path):
"""Save an existing file."""
chunk = model.get("chunk", None)
if not chunk or chunk == -1: # Avoid tedious log information
self.log.info("Saving file at %s", path)
model = await ensure_async(self.contents_manager.save(model, path))
validate_model(model, expect_content=False)
self._finish_model(model)
@web.authenticated
@authorized
async def post(self, path=""):
"""Create a new file in the specified path.
POST creates new files. The server always decides on the name.
POST /api/contents/path
New untitled, empty file or directory.
POST /api/contents/path
with body {"copy_from" : "/path/to/OtherNotebook.ipynb"}
New copy of OtherNotebook in path
"""
cm = self.contents_manager
file_exists = await ensure_async(cm.file_exists(path))
if file_exists:
raise web.HTTPError(400, "Cannot POST to files, use PUT instead.")
model = self.get_json_body()
if model is not None:
copy_from = model.get("copy_from")
ext = model.get("ext", "")
type = model.get("type", "")
if copy_from:
await self._copy(copy_from, path)
else:
await self._new_untitled(path, type=type, ext=ext)
else:
await self._new_untitled(path)
@web.authenticated
@authorized
async def put(self, path=""):
"""Saves the file in the location specified by name and path.
PUT is very similar to POST, but the requester specifies the name,
whereas with POST, the server picks the name.
PUT /api/contents/path/Name.ipynb
Save notebook at ``path/Name.ipynb``. Notebook structure is specified
in `content` key of JSON request body. If content is not specified,
create a new empty notebook.
"""
model = self.get_json_body()
if model:
if model.get("copy_from"):
raise web.HTTPError(400, "Cannot copy with PUT, only POST")
exists = await ensure_async(self.contents_manager.file_exists(path))
if exists:
await self._save(model, path)
else:
await self._upload(model, path)
else:
await self._new_untitled(path)
@web.authenticated
@authorized
async def delete(self, path=""):
"""delete a file in the given path"""
cm = self.contents_manager
self.log.warning("delete %s", path)
await ensure_async(cm.delete(path))
self.set_status(204)
self.finish()
class CheckpointsHandler(ContentsAPIHandler):
@web.authenticated
@authorized
async def get(self, path=""):
"""get lists checkpoints for a file"""
cm = self.contents_manager
checkpoints = await ensure_async(cm.list_checkpoints(path))
data = json.dumps(checkpoints, default=json_default)
self.finish(data)
@web.authenticated
@authorized
async def post(self, path=""):
"""post creates a new checkpoint"""
cm = self.contents_manager
checkpoint = await ensure_async(cm.create_checkpoint(path))
data = json.dumps(checkpoint, default=json_default)
location = url_path_join(
self.base_url,
"api/contents",
url_escape(path),
"checkpoints",
url_escape(checkpoint["id"]),
)
self.set_header("Location", location)
self.set_status(201)
self.finish(data)
class ModifyCheckpointsHandler(ContentsAPIHandler):
@web.authenticated
@authorized
async def post(self, path, checkpoint_id):
"""post restores a file from a checkpoint"""
cm = self.contents_manager
await ensure_async(cm.restore_checkpoint(checkpoint_id, path))
self.set_status(204)
self.finish()
@web.authenticated
@authorized
async def delete(self, path, checkpoint_id):
"""delete clears a checkpoint for a given file"""
cm = self.contents_manager
await ensure_async(cm.delete_checkpoint(checkpoint_id, path))
self.set_status(204)
self.finish()
class NotebooksRedirectHandler(JupyterHandler):
"""Redirect /api/notebooks to /api/contents"""
SUPPORTED_METHODS = ("GET", "PUT", "PATCH", "POST", "DELETE")
def get(self, path):
self.log.warning("/api/notebooks is deprecated, use /api/contents")
self.redirect(url_path_join(self.base_url, "api/contents", url_escape(path)))
put = patch = post = delete = get
class TrustNotebooksHandler(JupyterHandler):
"""Handles trust/signing of notebooks"""
@web.authenticated
@authorized(resource=AUTH_RESOURCE)
async def post(self, path=""):
cm = self.contents_manager
await ensure_async(cm.trust_notebook(path))
self.set_status(201)
self.finish()
# -----------------------------------------------------------------------------
# URL to handler mappings
# -----------------------------------------------------------------------------
_checkpoint_id_regex = r"(?P<checkpoint_id>[\w-]+)"
default_handlers = [
(r"/api/contents%s/checkpoints" % path_regex, CheckpointsHandler),
(
rf"/api/contents{path_regex}/checkpoints/{_checkpoint_id_regex}",
ModifyCheckpointsHandler,
),
(r"/api/contents%s/trust" % path_regex, TrustNotebooksHandler),
(r"/api/contents%s" % path_regex, ContentsHandler),
(r"/api/notebooks/?(.*)", NotebooksRedirectHandler),
]
| 33.648649
| 96
| 0.612137
|
ed2d8a9d221cac398c556ad68316e2140c3708c6
| 193
|
py
|
Python
|
models.py
|
benvand/simple-api
|
80ac7af4aa79b32b02b8ad0bf15f73255a690fb9
|
[
"MIT"
] | null | null | null |
models.py
|
benvand/simple-api
|
80ac7af4aa79b32b02b8ad0bf15f73255a690fb9
|
[
"MIT"
] | null | null | null |
models.py
|
benvand/simple-api
|
80ac7af4aa79b32b02b8ad0bf15f73255a690fb9
|
[
"MIT"
] | null | null | null |
from app import db
class Recipe(db.Model):
name = db.Column(db.String(80), unique=True, nullable=False, primary_key=True)
def __repr__(self):
return '<Recipe %r>' % self.name
| 24.125
| 82
| 0.668394
|
5c48d25a17c5c0203373f542b8c38c9684ae86fe
| 746
|
py
|
Python
|
tests/red_black_tests/test_in_place_subtract.py
|
lycantropos/dendroid
|
4315673ef52129909617225df6357416c56a84b3
|
[
"MIT"
] | null | null | null |
tests/red_black_tests/test_in_place_subtract.py
|
lycantropos/dendroid
|
4315673ef52129909617225df6357416c56a84b3
|
[
"MIT"
] | 16
|
2019-11-02T10:44:20.000Z
|
2020-09-21T15:22:29.000Z
|
tests/red_black_tests/test_in_place_subtract.py
|
lycantropos/dendroid
|
4315673ef52129909617225df6357416c56a84b3
|
[
"MIT"
] | 1
|
2020-03-13T08:41:39.000Z
|
2020-03-13T08:41:39.000Z
|
from typing import Tuple
from hypothesis import given
from tests.utils import (BaseSet,
are_nodes_parents_to_children,
do_paths_to_leaves_have_same_black_nodes_count,
do_red_nodes_have_black_children,
is_root_black)
from . import strategies
@given(strategies.sets_pairs)
def test_properties(sets_pair: Tuple[BaseSet, BaseSet]) -> None:
left_set, right_set = sets_pair
left_set -= right_set
left_tree = left_set.tree
assert are_nodes_parents_to_children(left_tree)
assert is_root_black(left_tree)
assert do_red_nodes_have_black_children(left_tree)
assert do_paths_to_leaves_have_same_black_nodes_count(left_tree)
| 31.083333
| 72
| 0.713137
|
69959e0749864291cb2d2d9974df0782a5746731
| 38,689
|
py
|
Python
|
workspace_tools/targets.py
|
mfiore02/mbed
|
fd285784c911f5af9ca51a29aa9908857db59b9d
|
[
"Apache-2.0"
] | 1
|
2015-01-02T06:58:35.000Z
|
2015-01-02T06:58:35.000Z
|
workspace_tools/targets.py
|
GustavWi/mbed
|
ea01d61fa18430564b78226045b196bb6bf6b66a
|
[
"Apache-2.0"
] | null | null | null |
workspace_tools/targets.py
|
GustavWi/mbed
|
ea01d61fa18430564b78226045b196bb6bf6b66a
|
[
"Apache-2.0"
] | null | null | null |
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
CORE_LABELS = {
"ARM7TDMI-S": ["ARM7"],
"Cortex-M0" : ["M0", "CORTEX_M"],
"Cortex-M0+": ["M0P", "CORTEX_M"],
"Cortex-M1" : ["M1", "CORTEX_M"],
"Cortex-M3" : ["M3", "CORTEX_M"],
"Cortex-M4" : ["M4", "CORTEX_M"],
"Cortex-M4F" : ["M4", "CORTEX_M"],
"Cortex-M7" : ["M7", "CORTEX_M"],
"Cortex-M7F" : ["M7", "CORTEX_M"],
"Cortex-A9" : ["A9", "CORTEX_A"]
}
import os
import shutil
from workspace_tools.patch import patch
class Target:
def __init__(self):
# ARM Core
self.core = None
# Is the disk provided by the interface chip of this board virtual?
self.is_disk_virtual = False
# list of toolchains that are supported by the mbed SDK for this target
self.supported_toolchains = None
# list of extra specific labels
self.extra_labels = []
# list of macros (-D)
self.macros = []
# Default online compiler:
self.default_toolchain = "ARM"
self.name = self.__class__.__name__
# Code used to determine devices' platform
# This code is prefix in URL link provided in mbed.htm (in mbed disk)
self.detect_code = []
def program_cycle_s(self):
return 4 if self.is_disk_virtual else 1.5
def get_labels(self):
return [self.name] + CORE_LABELS[self.core] + self.extra_labels
def init_hooks(self, hook, toolchain_name):
pass
### NXP ###
# This class implements the post-link patching step needed by LPC targets
class LPCTarget(Target):
def __init__(self):
Target.__init__(self)
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("post", self.lpc_patch)
@staticmethod
def lpc_patch(t_self, resources, elf, binf):
t_self.debug("LPC Patch: %s" % os.path.split(binf)[1])
patch(binf)
class LPC11C24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11CXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC1114(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U24_401']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["1040"]
class OC_MBUINO(LPC11U24):
def __init__(self):
LPC11U24.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.macros = ['TARGET_LPC11U24']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24_301(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC11U34_421(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class APPNEARME_MICRONFCBOARD(LPC11U34_421):
def __init__(self):
LPC11U34_421.__init__(self)
self.macros = ['LPC11U34_421']
self.is_disk_virtual = True
class LPC11U35_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_Y5_MBUG(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U37_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPCCAPPUCCINO(LPC11U37_501):
def __init__(self):
LPC11U37_501.__init__(self)
class ARCH_GPRS(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U37_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class LPC11U68(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC11U6X']
self.supported_toolchains = ["ARM", "uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1168"]
class LPC1347(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC13XX']
self.supported_toolchains = ["ARM", "GCC_ARM","IAR"]
class LPC1549(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC15XX']
self.supported_toolchains = ["uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1549"]
class LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'MBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.detect_code = ["1010"]
class ARCH_PRO(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class UBLOX_C027(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class LPC2368(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC23XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "GCC_CR"]
class LPC810(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC812(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["1050"]
class LPC824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM","GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class SSCI824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC4088(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC408X']
self.supported_toolchains = ["ARM", "GCC_CR", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
if not os.path.isdir(binf):
# Regular binary file, nothing to do
LPCTarget.lpc_patch(t_self, resources, elf, binf)
return
outbin = open(binf + ".temp", "wb")
partf = open(os.path.join(binf, "ER_IROM1"), "rb")
# Pad the fist part (internal flash) with 0xFF to 512k
data = partf.read()
outbin.write(data)
outbin.write('\xFF' * (512*1024 - len(data)))
partf.close()
# Read and append the second part (external flash) in chunks of fixed size
chunksize = 128 * 1024
partf = open(os.path.join(binf, "ER_IROM2"), "rb")
while True:
data = partf.read(chunksize)
outbin.write(data)
if len(data) < chunksize:
break
partf.close()
outbin.close()
# Remove the directory with the binary parts and rename the temporary
# file to 'binf'
shutil.rmtree(binf, True)
os.rename(binf + '.temp', binf)
t_self.debug("Generated custom binary file (internal flash + SPIFI)")
LPCTarget.lpc_patch(t_self, resources, elf, binf)
class LPC4088_DM(LPC4088):
pass
class LPC4330_M4(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR", "GCC_ARM"]
class LPC4330_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC4337(LPCTarget):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4337']
self.supported_toolchains = ["ARM"]
class LPC1800(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC43XX']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC11U37H_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
### Freescale ###
class KL05Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL25Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "GCC_CW_EWL", "GCC_CW_NEWLIB", "GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0200"]
class KL43Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL46Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0220"]
class K20D50M(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
class TEENSY3_1(Target):
OUTPUT_EXT = 'hex'
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX', 'K20DX256']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset = 0)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class K22F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE']
self.macros = ["CPU_MK22FN512VLH12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0201"]
class K64F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F', 'FRDM']
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
self.detect_code = ["0240"]
class MTS_GAMBIT(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED", "TARGET_K64F"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
### STMicro ###
class NUCLEO_F030R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F030R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0725"]
class NUCLEO_F070RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F070RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0755"]
class NUCLEO_F072RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F072RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0730"]
class NUCLEO_F091RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F091RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0750"]
class NUCLEO_F103RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F103RB']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0700"]
class NUCLEO_F302R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F302R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0705"]
class NUCLEO_F303RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0745"]
class NUCLEO_F334R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0735"]
class NUCLEO_F401RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0720"]
class NUCLEO_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0740"]
class NUCLEO_L053R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053R8']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0715"]
class NUCLEO_L073RZ(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L073RZ']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0760"]
class NUCLEO_L152RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0710"]
class STM32F3XX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['STM', 'STM32F3XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class STM32F407(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F4XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
class ARCH_MAX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
def program_cycle_s(self):
return 2
class DISCO_F051R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F051', 'STM32F051R8']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F100RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F100RB']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F303VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303', 'STM32F303VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F334C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334C8']
self.supported_toolchains = ["GCC_ARM",]
self.default_toolchain = "GCC_ARM"
self.detect_code = ["0735"]
class DISCO_F407VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
class DISCO_F429ZI(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F429', 'STM32F429ZI']
self.supported_toolchains = ["GCC_ARM", "IAR"]
self.default_toolchain = "GCC_ARM"
class DISCO_L053C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053C8']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class MTS_MDOT_F405RG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F405RG']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=48000000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
class MTS_MDOT_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=96000000', 'USE_PLL_HSE_EXTC=0']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
class MTS_DRAGONFLY_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
class DISCO_F401VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401', 'STM32F401VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class UBLOX_C029(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F439', 'STM32F439ZI']
self.macros = ['HSE_VALUE=24000000', 'HSE_STARTUP_TIMEOUT=5000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
### Nordic ###
class NRF51822(Target):
# the following is a list of possible Nordic softdevices in decreasing order
# of preference.
EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_7.0.0_softdevice.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_6.0.0_softdevice.hex',
'offset' : 0x14000
}
]
EXPECTED_BOOTLOADER_FILENAME = "nrf51822_bootloader.hex"
OUTPUT_EXT = 'hex'
MERGE_SOFT_DEVICE = True
MERGE_BOOTLOADER = False
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "NRF51822_MKIT", "MCU_NRF51822", "MCU_NORDIC_16K"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["1070"]
def program_cycle_s(self):
return 6
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM', 'IAR']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
# Scan to find the actual paths of soft device and bootloader files
sdf = None
blf = None
for hexf in resources.hex_files:
if hexf.find(t_self.target.EXPECTED_BOOTLOADER_FILENAME) != -1:
blf = hexf
else:
for softdeviceAndOffsetEntry in t_self.target.EXPECTED_SOFTDEVICES_WITH_OFFSETS:
if hexf.find(softdeviceAndOffsetEntry['name']) != -1:
sdf = hexf
break
if sdf is None:
t_self.debug("Hex file not found. Aborting.")
return
# Merge user code with softdevice
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset=softdeviceAndOffsetEntry['offset'])
if t_self.target.MERGE_SOFT_DEVICE is True:
t_self.debug("Merge SoftDevice file %s" % softdeviceAndOffsetEntry['name'])
sdh = IntelHex(sdf)
binh.merge(sdh)
if t_self.target.MERGE_BOOTLOADER is True and blf is not None:
t_self.debug("Merge BootLoader file %s" % t_self.target.EXPECTED_BOOTLOADER_FILENAME)
blh = IntelHex(blf)
binh.merge(blh)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class NRF51822_BOOT(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "NRF51822_MKIT", "MCU_NRF51822", "MCU_NORDIC_16K", "NRF51822"]
self.macros = ['TARGET_NRF51822', 'TARGET_OTA_ENABLED']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class NRF51822_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "NRF51822_MKIT", "MCU_NRF51822", "MCU_NORDIC_16K", "NRF51822"]
self.macros = ['TARGET_NRF51822', 'TARGET_OTA_ENABLED']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = False
class NRF51_DK(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K']
self.macros = ['TARGET_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_BOOT(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K', 'NRF51_DK']
self.macros = ['TARGET_NRF51822', 'TARGET_NRF51_DK', 'TARGET_OTA_ENABLED']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class NRF51_DK_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K', 'NRF51_DK']
self.macros = ['TARGET_NRF51822', 'TARGET_NRF51_DK', 'TARGET_OTA_ENABLED']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = False
class NRF51_DONGLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K']
self.macros = ['TARGET_NRF51822']
class ARCH_BLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class SEEED_TINY_BLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
class SEEED_TINY_BLE_BOOT(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'SEEED_TINY_BLE']
self.macros = ['TARGET_NRF51822', 'TARGET_SEEED_TINY_BLE', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class SEEED_TINY_BLE_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'SEEED_TINY_BLE']
self.macros = ['TARGET_NRF51822', 'TARGET_SEEED_TINY_BLE', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
class HRM1017(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
class RBLAB_NRF51822(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_BLENANO(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
class NRF51822_Y5_MBUG(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
class XADOW_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class WALLBOT_BLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
class DELTA_DFCM_NNN40(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
class DELTA_DFCM_NNN40_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'DELTA_DFCM_NNN40']
self.MERGE_SOFT_DEVICE = False
class DELTA_DFCM_NNN40_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'DELTA_DFCM_NNN40']
self.MERGE_SOFT_DEVICE = False
### ARM ###
class ARM_MPS2_M0(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['ARM_SSG', 'MPS2_M0']
self.macros = ['CMSDK_CM0']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M0P(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['ARM_SSG', 'MPS2_M0P']
self.macros = ['CMSDK_CM0plus']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M1(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M1"
self.extra_labels = ['ARM_SSG', 'MPS2_M1']
self.macros = ['CMSDK_CM1']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M3(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['ARM_SSG', 'MPS2_M3']
self.macros = ['CMSDK_CM3']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M4(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2_M4']
self.macros = ['CMSDK_CM4']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M7(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M7F"
self.extra_labels = ['ARM_SSG', 'MPS2_M7']
self.macros = ['CMSDK_CM7']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2(ARM_MPS2_M4):
pass
### Renesas ###
class RZ_A1H(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM"
def program_cycle_s(self):
return 2
# Get a single instance for each target
TARGETS = [
### NXP ###
LPC11C24(),
LPC11U24(),
OC_MBUINO(), # LPC11U24
LPC11U24_301(),
LPC11U34_421(),
APPNEARME_MICRONFCBOARD(), #LPC11U34_421
LPC11U35_401(),
LPC11U35_501(),
XADOW_M0(), # LPC11U35_501
LPC11U35_Y5_MBUG(),
LPC11U37_501(),
LPCCAPPUCCINO(),# LPC11U37_501
ARCH_GPRS(), # LPC11U37_501
LPC11U68(),
LPC1114(),
LPC1347(),
LPC1549(),
LPC1768(),
ARCH_PRO(), # LPC1768
UBLOX_C027(), # LPC1768
LPC2368(),
LPC810(),
LPC812(),
LPC824(),
SSCI824(), # LPC824
LPC4088(),
LPC4088_DM(),
LPC4330_M4(),
LPC4330_M0(),
LPC4337(),
LPC11U37H_401(),
### Freescale ###
KL05Z(),
KL25Z(),
KL43Z(),
KL46Z(),
K20D50M(),
TEENSY3_1(),
K22F(),
K64F(),
MTS_GAMBIT(), # FRDM K64F
### STMicro ###
NUCLEO_F030R8(),
NUCLEO_F070RB(),
NUCLEO_F072RB(),
NUCLEO_F091RC(),
NUCLEO_F103RB(),
NUCLEO_F302R8(),
NUCLEO_F303RE(),
NUCLEO_F334R8(),
NUCLEO_F401RE(),
NUCLEO_F411RE(),
NUCLEO_L053R8(),
NUCLEO_L073RZ(),
NUCLEO_L152RE(),
STM32F3XX(),
STM32F407(),
DISCO_F051R8(),
DISCO_F100RB(),
DISCO_F303VC(),
DISCO_F334C8(),
DISCO_F407VG(), # STM32F407
ARCH_MAX(), # STM32F407
DISCO_F429ZI(),
DISCO_L053C8(),
MTS_MDOT_F405RG(),
MTS_MDOT_F411RE(),
MTS_DRAGONFLY_F411RE(),
DISCO_F401VC(),
UBLOX_C029(), # STM32F439
### Nordic ###
NRF51822(),
NRF51822_BOOT(), # nRF51822
NRF51822_OTA(), # nRF51822
NRF51_DK(),
NRF51_DK_BOOT(), # nRF51822
NRF51_DK_OTA(), # nRF51822
NRF51_DONGLE(),
ARCH_BLE(), # nRF51822
SEEED_TINY_BLE(), # nRF51822
SEEED_TINY_BLE_BOOT(),# nRF51822
SEEED_TINY_BLE_OTA(),# nRF51822
HRM1017(), # nRF51822
RBLAB_NRF51822(),# nRF51822
RBLAB_BLENANO(),# nRF51822
NRF51822_Y5_MBUG(),#nRF51822
WALLBOT_BLE(), # nRF51822
DELTA_DFCM_NNN40(), # nRF51822
DELTA_DFCM_NNN40_OTA(), # nRF51822
### ARM ###
ARM_MPS2_M0(),
ARM_MPS2_M0P(),
ARM_MPS2_M1(),
ARM_MPS2_M3(),
ARM_MPS2_M4(),
ARM_MPS2_M7(),
ARM_MPS2(),
### Renesas ###
RZ_A1H(),
]
# Map each target name to its unique instance
TARGET_MAP = {}
for t in TARGETS:
TARGET_MAP[t.name] = t
TARGET_NAMES = TARGET_MAP.keys()
# Some targets with different name have the same exporters
EXPORT_MAP = { }
# Detection APIs
def get_target_detect_codes():
""" Returns dictionary mapping detect_code -> platform_name
"""
result = {}
for target in TARGETS:
for detect_code in target.detect_code:
result[detect_code] = target.name
return result
| 33.439067
| 101
| 0.618057
|
6571be7828291e4c5c39d2b227e0895a8f78cfe0
| 625
|
py
|
Python
|
common/migrations/0015_auto_20161013_1746.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
common/migrations/0015_auto_20161013_1746.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
common/migrations/0015_auto_20161013_1746.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-13 17:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0014_remove_person_title'),
]
operations = [
migrations.AlterField(
model_name='person',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
| 27.173913
| 160
| 0.6768
|
bdbff95c6e9d31b1dcacab8b81589e38dd07300a
| 5,450
|
py
|
Python
|
readthedocs/gold/migrations/0001_initial.py
|
ardalis/readthedocs.org
|
1c417d866f014e01d3842022facf7fed4c09921a
|
[
"MIT"
] | null | null | null |
readthedocs/gold/migrations/0001_initial.py
|
ardalis/readthedocs.org
|
1c417d866f014e01d3842022facf7fed4c09921a
|
[
"MIT"
] | null | null | null |
readthedocs/gold/migrations/0001_initial.py
|
ardalis/readthedocs.org
|
1c417d866f014e01d3842022facf7fed4c09921a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GoldUser'
db.create_table(u'gold_golduser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='gold', unique=True, to=orm['auth.User'])),
('level', self.gf('django.db.models.fields.CharField')(default='supporter', max_length=20)),
('last_4_digits', self.gf('django.db.models.fields.CharField')(max_length=4)),
('stripe_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
('subscribed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'gold', ['GoldUser'])
def backwards(self, orm):
# Deleting model 'GoldUser'
db.delete_table(u'gold_golduser')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'gold.golduser': {
'Meta': {'object_name': 'GoldUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_4_digits': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'level': ('django.db.models.fields.CharField', [], {'default': "'supporter'", 'max_length': '20'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subscribed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gold'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['gold']
| 68.125
| 195
| 0.583486
|
ce8cffadb2332689b6185d6db2912424e3bd2838
| 3,545
|
py
|
Python
|
scalabel/label/from_mot.py
|
scalabel/scalabel
|
857a32e1c08e5b9a7ab346468940621c2fe2226a
|
[
"Apache-2.0"
] | 279
|
2019-11-18T01:48:39.000Z
|
2022-03-30T00:16:43.000Z
|
scalabel/label/from_mot.py
|
scalabel/scalabel
|
857a32e1c08e5b9a7ab346468940621c2fe2226a
|
[
"Apache-2.0"
] | 141
|
2019-11-20T02:36:11.000Z
|
2022-03-29T15:17:46.000Z
|
scalabel/label/from_mot.py
|
scalabel/scalabel
|
857a32e1c08e5b9a7ab346468940621c2fe2226a
|
[
"Apache-2.0"
] | 85
|
2019-11-18T06:10:12.000Z
|
2022-03-27T12:32:55.000Z
|
"""Convert MOT Challenge format dataset to Scalabel."""
import argparse
import os
from collections import defaultdict
from typing import Dict, List, Union
from PIL import Image
from ..common.io import load_file_as_list
from .io import save
from .transforms import bbox_to_box2d
from .typing import Frame, ImageSize, Label
# Classes in MOT:
# 1: 'pedestrian'
# 2: 'person on vehicle'
# 3: 'car'
# 4: 'bicycle'
# 5: 'motorbike'
# 6: 'non motorized vehicle'
# 7: 'static person'
# 8: 'distractor'
# 9: 'occluder'
# 10: 'occluder on the ground',
# 11: 'occluder full'
# 12: 'reflection'
IGNORE = [
"person on vehicle",
"static person",
"distractor",
"reflection",
"ignore",
]
NAME_MAPPING = {
"1": "pedestrian",
"2": "person on vehicle",
"7": "static person",
"8": "distractor",
"12": "reflection",
"13": "ignore",
}
def parse_arguments() -> argparse.Namespace:
"""Parse the arguments."""
parser = argparse.ArgumentParser(description="motchallenge to scalabel")
parser.add_argument(
"--input",
"-i",
help="path to MOTChallenge data (images + annotations).",
)
parser.add_argument(
"--output",
"-o",
default=".'",
help="Output path for Scalabel format annotations.",
)
return parser.parse_args()
def parse_annotations(ann_filepath: str) -> Dict[int, List[Label]]:
"""Parse annotation file into List of Scalabel Label type per frame."""
outputs = defaultdict(list)
for line in load_file_as_list(ann_filepath):
gt = line.strip().split(",")
class_id = gt[7]
if class_id not in NAME_MAPPING:
continue
class_name = NAME_MAPPING[class_id]
frame_id, ins_id = map(int, gt[:2])
bbox = list(map(float, gt[2:6]))
box2d = bbox_to_box2d(bbox)
ignored = False
if class_name in IGNORE:
ignored = True
class_name = "pedestrian"
attrs = dict(
visibility=float(gt[8]), ignored=ignored
) # type: Dict[str, Union[bool, float, str]]
ann = Label(
category=class_name,
id=ins_id,
box2d=box2d,
attributes=attrs,
)
outputs[frame_id].append(ann)
return outputs
def from_mot(data_path: str) -> List[Frame]:
"""Function converting MOT annotations to Scalabel format."""
frames = []
for video in sorted(os.listdir(data_path)):
img_names = sorted(os.listdir(os.path.join(data_path, video, "img1")))
annotations = parse_annotations(
os.path.join(data_path, video, "gt/gt.txt"),
)
for i, img_name in enumerate(img_names):
assert i + 1 == int(img_name.replace(".jpg", ""))
relative_path = os.path.join(video, "img1", img_name)
img = Image.open(os.path.join(data_path, relative_path))
frame = Frame(
name=img_name,
video_name=video,
url=relative_path,
frame_index=i,
size=ImageSize(width=img.width, height=img.height),
labels=annotations[i + 1] if i in annotations else None,
)
frames.append(frame)
return frames
def run(args: argparse.Namespace) -> None:
"""Run conversion with command line arguments."""
result = from_mot(args.input)
save(os.path.join(args.output, "scalabel_anns.json"), result)
if __name__ == "__main__":
run(parse_arguments())
| 28.58871
| 78
| 0.597743
|
7f6043e2b09f2c643026bb2873bf38b4fc42c85f
| 3,312
|
py
|
Python
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/enable_key_request.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/enable_key_request.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/enable_key_request.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class EnableKeyRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'body': 'OperateKeyRequestBody'
}
attribute_map = {
'version_id': 'version_id',
'body': 'body'
}
def __init__(self, version_id='v1.0', body=None):
"""EnableKeyRequest - a model defined in huaweicloud sdk"""
self._version_id = None
self._body = None
self.discriminator = None
self.version_id = version_id
if body is not None:
self.body = body
@property
def version_id(self):
"""Gets the version_id of this EnableKeyRequest.
:return: The version_id of this EnableKeyRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this EnableKeyRequest.
:param version_id: The version_id of this EnableKeyRequest.
:type: str
"""
self._version_id = version_id
@property
def body(self):
"""Gets the body of this EnableKeyRequest.
:return: The body of this EnableKeyRequest.
:rtype: OperateKeyRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this EnableKeyRequest.
:param body: The body of this EnableKeyRequest.
:type: OperateKeyRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnableKeyRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.902256
| 74
| 0.542874
|
3b774e357c0afb8f8d675c20a1910348cdd3866e
| 1,487
|
py
|
Python
|
conanfile.py
|
Chrismarsh/conan-sparsehash
|
48d5456022b0313d82496093f26dd32d1daa01ff
|
[
"MIT"
] | null | null | null |
conanfile.py
|
Chrismarsh/conan-sparsehash
|
48d5456022b0313d82496093f26dd32d1daa01ff
|
[
"MIT"
] | null | null | null |
conanfile.py
|
Chrismarsh/conan-sparsehash
|
48d5456022b0313d82496093f26dd32d1daa01ff
|
[
"MIT"
] | null | null | null |
from conans import AutoToolsBuildEnvironment, ConanFile, tools
import os
class SparsehashConan(ConanFile):
name = "sparsehash"
description = "The C++ associative containers"
homepage = "https://github.com/sparsehash/sparsehash"
license = "BSD-3-Clause"
topics = ("conan", "libsparsehash",
"dense_hash_map", "sparse_hash_map",
"dense_hash_set", "sparse_hash_set")
settings = "os", "arch", "compiler", "build_type"
url = "https://github.com/Chrismarsh/conan-sparsehash"
exports = ["LICENSE"]
_autotools = None
_source_subfolder = 'sparsehash'
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("sparsehash-sparsehash-{}".format(self.version), self._source_subfolder)
def _configure_autotools(self):
if not self._autotools:
self._autotools = AutoToolsBuildEnvironment(self)
self._autotools.configure(configure_dir=self._source_subfolder)
return self._autotools
def build(self):
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "lib"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_id(self):
self.info.header_only()
| 34.581395
| 90
| 0.669132
|
f11158b4d0c2c4c1a49311a63eb869fde6f99c82
| 3,004
|
py
|
Python
|
flask_apscheduler/auth.py
|
0x9fff00/flask-apscheduler
|
cc52c39e1948c4e8de5da0d01db45f1779f61997
|
[
"Apache-2.0"
] | 2
|
2019-01-24T15:09:15.000Z
|
2019-01-24T15:09:17.000Z
|
flask_apscheduler/auth.py
|
0x9fff00/flask-apscheduler
|
cc52c39e1948c4e8de5da0d01db45f1779f61997
|
[
"Apache-2.0"
] | null | null | null |
flask_apscheduler/auth.py
|
0x9fff00/flask-apscheduler
|
cc52c39e1948c4e8de5da0d01db45f1779f61997
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Vinicius Chiele. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides classes for authentication."""
import base64
from flask import request
from werkzeug.http import bytes_to_wsgi, wsgi_to_bytes
def get_authorization_header():
"""
Return request's 'Authorization:' header as
a two-tuple of (type, info).
"""
header = request.environ.get('HTTP_AUTHORIZATION')
if not header:
return None
header = wsgi_to_bytes(header)
try:
auth_type, auth_info = header.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return None
return auth_type, auth_info
class Authorization(dict):
"""
A class to hold the authorization data.
:param str auth_type: The authorization type. e.g: basic, bearer.
"""
def __init__(self, auth_type, **kwargs):
super(Authorization, self).__init__(**kwargs)
self.auth_type = auth_type
class HTTPAuth(object):
"""
A base class from which all authentication classes should inherit.
"""
def get_authorization(self):
"""
Get the authorization header.
:return Authentication: The authentication data or None if it is not present or invalid.
"""
raise NotImplemented()
def get_authenticate_header(self):
"""
Return the value of `WWW-Authenticate` header in a
`401 Unauthenticated` response.
"""
pass
class HTTPBasicAuth(HTTPAuth):
"""
HTTP Basic authentication.
"""
www_authenticate_realm = 'Authentication Required'
def get_authorization(self):
"""
Get the username and password for Basic authentication header.
:return Authentication: The authentication data or None if it is not present or invalid.
"""
auth = get_authorization_header()
if not auth:
return None
auth_type, auth_info = auth
if auth_type != b'basic':
return None
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception:
return None
return Authorization('basic', username=bytes_to_wsgi(username), password=bytes_to_wsgi(password))
def get_authenticate_header(self):
"""
Return the value of `WWW-Authenticate` header in a
`401 Unauthenticated` response.
"""
return 'Basic realm="%s"' % self.www_authenticate_realm
| 27.309091
| 105
| 0.663449
|
803510bdfc3a5c43983c8abbda7645de5edd65a4
| 903
|
py
|
Python
|
src/pattern_matchers/regex.py
|
jonwesneski/end2
|
708c7b96c1086959565e2889a0818451e6e2c931
|
[
"MIT"
] | null | null | null |
src/pattern_matchers/regex.py
|
jonwesneski/end2
|
708c7b96c1086959565e2889a0818451e6e2c931
|
[
"MIT"
] | 1
|
2022-03-12T19:43:00.000Z
|
2022-03-12T19:43:00.000Z
|
src/pattern_matchers/regex.py
|
jonwesneski/end2
|
708c7b96c1086959565e2889a0818451e6e2c931
|
[
"MIT"
] | null | null | null |
from glob import glob
import os
import re
from src.pattern_matchers.base import PatternMatcherBase
class RegexModulePatternMatcher(PatternMatcherBase):
regex_path_separator = f'\{os.sep}'
@classmethod
def parse_str(cls, pattern: str, include: bool = True):
items = []
include = False
for module in filter(lambda x: not x.endswith('__init__.py'),
glob(f'.{os.sep}**{os.sep}*.py', recursive=True)):
if re.match(pattern, module):
items.append(module)
include = True
return cls(items, pattern, include)
class RegexTestCasePatternMatcher(PatternMatcherBase):
@classmethod
def parse_str(cls, pattern: str, include: bool = True):
return cls([], pattern, True)
def included(self, func) -> bool:
return True if re.match(self._pattern, func.__name__) else False
| 31.137931
| 79
| 0.638981
|
bdc2fd38447d96b974ebb4afd0adadf5e4e27c16
| 397
|
py
|
Python
|
sdk/communication/azure-communication-chat/azure/communication/chat/_version.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/communication/azure-communication-chat/azure/communication/chat/_version.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-chat/azure/communication/chat/_version.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "1.2.0"
SDK_MONIKER = "communication-chat/{}".format(VERSION) # type: str
| 39.7
| 76
| 0.455919
|
a3905a447bbbc1fdc7a56106e7be712b883cfa09
| 1,047
|
py
|
Python
|
tests/test_kb_plugins.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_kb_plugins.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_kb_plugins.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
import angr
import networkx
import os
location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_kb_plugins():
p = angr.Project(os.path.join(location, 'x86_64', 'fauxware'), auto_load_libs=False)
assert isinstance(p.kb.data, angr.knowledge_plugins.Data)
assert isinstance(p.kb.functions, angr.knowledge_plugins.FunctionManager)
assert isinstance(p.kb.variables, angr.knowledge_plugins.VariableManager)
assert isinstance(p.kb.labels, angr.knowledge_plugins.Labels)
assert isinstance(p.kb.comments, angr.knowledge_plugins.Comments)
assert isinstance(p.kb.callgraph, networkx.Graph)
assert isinstance(p.kb.resolved_indirect_jumps, dict)
assert isinstance(p.kb.unresolved_indirect_jumps, set)
assert dir(p.kb) is not None
for plugin in ['data', 'functions', 'variables', 'labels', 'comments', 'callgraph', 'resolved_indirect_jumps', 'unresolved_indirect_jumps']:
assert plugin in dir(p.kb)
if __name__ == '__main__':
test_kb_plugins()
| 37.392857
| 144
| 0.74021
|
d0ec3e508f1b06c855272ea4f49ac662fdfe167b
| 26
|
py
|
Python
|
simeng/__init__.py
|
wstlabs/similarity-engine
|
fde4dd31b0f1738573513159f950823cb2d4a7ce
|
[
"Apache-2.0"
] | null | null | null |
simeng/__init__.py
|
wstlabs/similarity-engine
|
fde4dd31b0f1738573513159f950823cb2d4a7ce
|
[
"Apache-2.0"
] | null | null | null |
simeng/__init__.py
|
wstlabs/similarity-engine
|
fde4dd31b0f1738573513159f950823cb2d4a7ce
|
[
"Apache-2.0"
] | null | null | null |
from .core import ingest
| 8.666667
| 24
| 0.769231
|
26b467c0c8390d8093df967cd1ba27454c3052bd
| 1,684
|
py
|
Python
|
text2hex.py
|
jamokou/text2hex
|
2470ec82922404177258dcd427fffded3212bdc2
|
[
"MIT"
] | 1
|
2015-06-13T21:10:46.000Z
|
2015-06-13T21:10:46.000Z
|
text2hex.py
|
jamokou/text2hex
|
2470ec82922404177258dcd427fffded3212bdc2
|
[
"MIT"
] | null | null | null |
text2hex.py
|
jamokou/text2hex
|
2470ec82922404177258dcd427fffded3212bdc2
|
[
"MIT"
] | null | null | null |
# Program Name : text2hex
# Programmer : The Alpha
# Credits : Iranpython.blog.ir
# Version : 0.91(Beta Version)
# Linted By : Pyflakes
# Info : text2hex is a simple tool that uses to convert strings to hex.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import binascii
class TextToHex(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle("Text2Hex")
layout = QGridLayout()
self.label_cp = QLabel("<b><code><h3>pystudent copyright</h3></code></b>")
label_text = QLabel("<b><code><h3>Text :</h3></code></b>")
self.line_edit_text = QLineEdit()
label_hex = QLabel("<b><code><h3>Hex :</h3></code></b>")
self.line_edit_hex = QLineEdit()
self.line_edit_hex.setReadOnly(True)
self.convert_button = QPushButton("Convert")
self.exit_button = QPushButton("Exit")
layout.addWidget(label_text, 0, 0)
layout.addWidget(self.line_edit_text, 0, 1)
layout.addWidget(label_hex, 1, 0)
layout.addWidget(self.line_edit_hex, 1, 1)
layout.addWidget(self.convert_button, 2, 0)
layout.addWidget(self.label_cp, 2, 1)
layout.addWidget(self.exit_button, 2, 2)
self.convert_button.clicked.connect(self.convertor)
self.exit_button.clicked.connect(self.close)
self.setLayout(layout)
def convertor(self):
data = self.line_edit_text.text()
hex_text = binascii.hexlify(bytes(data, 'utf-8'))
hex_text = str(hex_text)
hex_text = hex_text.replace("b'", "")
hex_text = hex_text.replace("'", "")
hex_text = "0x"+hex_text
self.line_edit_hex.setText(hex_text)
if hex_text == "0x":
self.line_edit_hex.setText("")
app = QApplication(sys.argv)
dialog = TextToHex()
dialog.show()
app.exec_()
| 27.606557
| 79
| 0.703682
|
b8074df98c29ea16952bfaab4d6bcdc54d947e95
| 1,777
|
py
|
Python
|
m/db/engine_interface.py
|
minersoft/miner
|
247ae1ffb27a4ce3203ac236afd2ed145b31a465
|
[
"BSD-3-Clause"
] | 1
|
2015-04-18T16:48:48.000Z
|
2015-04-18T16:48:48.000Z
|
m/db/engine_interface.py
|
minersoft/miner
|
247ae1ffb27a4ce3203ac236afd2ed145b31a465
|
[
"BSD-3-Clause"
] | null | null | null |
m/db/engine_interface.py
|
minersoft/miner
|
247ae1ffb27a4ce3203ac236afd2ed145b31a465
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright Michael Groys, 2014
#
# This module defines api for interaction between miner and databases
class FetchCursorInterface(object):
"""Abstract interface for fetching query result data"""
def __init__(self):
pass
def getColumnNames(self):
raise NotImplementedError
def getNumColumns(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def close(self):
pass
class ConnectionInterface(object):
"""Abstract interface for database connection"""
def close(self):
raise NotImplementedError
def commit(self):
pass
def getTableNames(self):
return []
def fetch(self, query, *params, **env):
"""
Fetches data from database,
params - define query parameters
env - set of optional parameters that control query execution
returns FetchCursorInterface for iteration over results
"""
raise NotImplementedError
def push(self, statement, seq_of_params, **namedParams):
"""
Fetches data from database,
params - define query parameters
env - set of optional parameters that control query execution
"""
raise NotImplementedError
def execute(self, statement, *params, **env):
"""
General statement execution
params - define query parameters
env - set of optional parameters that control query execution
"""
raise NotImplementedError
class EngineInterface(object):
"""Database engine"""
def __init__(self):
pass
def connect(self, dbtype, parsedUrl, **kwargs):
raise NotImplementedError
| 31.175439
| 73
| 0.626899
|
5649c9cdbf4d21aab821b80f7d41e2ac4517c9d7
| 250
|
py
|
Python
|
python/bootcamp/unittest/cap.py
|
RodolfoDiaz/CodeLibrary
|
603ac9eee5b014bef8f04545bc4e73b0ec376131
|
[
"MIT"
] | 1
|
2018-10-11T14:29:40.000Z
|
2018-10-11T14:29:40.000Z
|
python/bootcamp/unittest/cap.py
|
RodolfoDiaz/CodeLibrary
|
603ac9eee5b014bef8f04545bc4e73b0ec376131
|
[
"MIT"
] | 2
|
2020-04-26T21:12:00.000Z
|
2020-10-28T21:10:03.000Z
|
python/bootcamp/unittest/cap.py
|
RodolfoDiaz/CodeLibrary
|
603ac9eee5b014bef8f04545bc4e73b0ec376131
|
[
"MIT"
] | null | null | null |
"""Capitalize words."""
def cap_text(text):
"""capitalize() upper cases the first letter of a string."""
return text.capitalize()
def title_text(text):
"""title() upper cases the first letter of every word."""
return text.title()
| 20.833333
| 64
| 0.66
|
073f03b2a94245cc1f97f1355f9c9fb176ec0cb8
| 1,678
|
py
|
Python
|
steamboat_people_counter.py
|
Seagor/Web-Cam-Counter
|
d80d12f713d0cf089a203c427d2f930b2f4716dd
|
[
"MIT"
] | null | null | null |
steamboat_people_counter.py
|
Seagor/Web-Cam-Counter
|
d80d12f713d0cf089a203c427d2f930b2f4716dd
|
[
"MIT"
] | null | null | null |
steamboat_people_counter.py
|
Seagor/Web-Cam-Counter
|
d80d12f713d0cf089a203c427d2f930b2f4716dd
|
[
"MIT"
] | null | null | null |
import base64 as b64
import numpy as np
import cv2
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed
from IPython.display import Image, display
import ipywidgets as widgets
def convert_media_to_image(media):
arr = np.asarray(bytearray(b64.b64decode(media["__data__"])), dtype=np.uint8)
return cv2.imdecode(arr,-1)
history = 50
nGauss = 2
bgThresh = 0.75
noise = 20
fgbg = cv2.BackgroundSubtractorMOG(history, nGauss, bgThresh, noise)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(4,4))
def preprocess(image, ref):
fgmask = fgbg.apply(image)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
return fgmask
def display_image(image):
_, png_image = cv2.imencode(".png", image)
display(Image(data=png_image.tostring()))
images = [convert_media_to_image(rec["media"]) for oid, rec in data]
shape = list(images[0].shape)
shape.append(min(len(images), 100))
avg = np.zeros(shape)
for z, image in enumerate(images[:100]):
avg[:,:,:,z] = image
avg = np.uint8(np.average(avg, axis=3))
avg = cv2.merge([avg[:,:,0],avg[:,:,1],avg[:,:,2]])
fgbg.apply(avg)
preprocessed = [preprocess(img, avg) for img in images]
cv2.imwrite("avg.jpg", avg)
@interact(idx=widgets.IntSlider(min=0,max=len(images)-1,step=1,value=0))
def display_contours(idx):
contours, _ = cv2.findContours(preprocessed[idx],cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
good_contours = [c for c, a in zip(contours, areas) if a < 200]
cv2.drawContours(images[idx], good_contours, -1, (0, 0, 255))
display_image(images[idx])
print "%i 'people' found" % len(good_contours)
| 34.244898
| 95
| 0.718117
|
d95ecde2f52aa90a97808a7160e04c6c2ad80665
| 624
|
py
|
Python
|
pythonx/ncm2_sorter/abbrfuzzy.py
|
ssmithstone/ncm2
|
bbb6fe7b7ab05a93627755ccc1dc134be99681c7
|
[
"MIT"
] | 1,448
|
2018-06-29T02:29:20.000Z
|
2022-03-29T04:16:43.000Z
|
pythonx/ncm2_sorter/abbrfuzzy.py
|
ssmithstone/ncm2
|
bbb6fe7b7ab05a93627755ccc1dc134be99681c7
|
[
"MIT"
] | 188
|
2018-06-29T06:30:58.000Z
|
2022-03-17T11:00:57.000Z
|
pythonx/ncm2_sorter/abbrfuzzy.py
|
ssmithstone/ncm2
|
bbb6fe7b7ab05a93627755ccc1dc134be99681c7
|
[
"MIT"
] | 55
|
2018-07-11T13:54:21.000Z
|
2022-03-16T23:32:11.000Z
|
import sys
def Sorter(**kargs):
def key(e):
w = e['word']
ud = e['user_data']
hl = ud['match_highlight']
# prefer less pieces
pieces = len(hl)
# prefer earlier match
first_match = sys.maxsize
if len(hl):
first_match = hl[0][0]
# prefer shorter span
span = sys.maxsize
if len(hl):
span = hl[-1][1] - hl[0][0]
# alphanum
scw = w.swapcase()
return [pieces, first_match, span, scw]
def sort(matches: list):
matches.sort(key=key)
return matches
return sort
| 19.5
| 47
| 0.501603
|
f0385f56b6fdeab01a9c093edce257d56bf37353
| 2,531
|
py
|
Python
|
build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/IPython/kernel/core/message_cache.py
|
lumanjiao/XLS_BigData
|
2c4c37872b8636df1c8b0e005bc12a635a753c7a
|
[
"Apache-2.0"
] | 11
|
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
IPython/kernel/core/message_cache.py
|
08saikiranreddy/ipython
|
3498382180ad409592f46a9dd0d190ca917bfbff
|
[
"BSD-3-Clause-Clear"
] | 1
|
2015-07-16T22:26:53.000Z
|
2015-07-16T22:26:53.000Z
|
IPython/kernel/core/message_cache.py
|
08saikiranreddy/ipython
|
3498382180ad409592f46a9dd0d190ca917bfbff
|
[
"BSD-3-Clause-Clear"
] | 5
|
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
# encoding: utf-8
"""Storage for the responses from the interpreter."""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
class IMessageCache(object):
""" Storage for the response from the interpreter.
"""
def add_message(self, i, message):
""" Add a message dictionary to the cache.
Parameters
----------
i : int
message : dict
"""
def get_message(self, i=None):
""" Get the message from the cache.
Parameters
----------
i : int, optional
The number of the message. If not provided, return the
highest-numbered message.
Returns
-------
message : dict
Raises
------
IndexError if the message does not exist in the cache.
"""
class SimpleMessageCache(object):
""" Simple dictionary-based, in-memory storage of the responses from the
interpreter.
"""
def __init__(self):
self.cache = {}
def add_message(self, i, message):
""" Add a message dictionary to the cache.
Parameters
----------
i : int
message : dict
"""
self.cache[i] = message
def get_message(self, i=None):
""" Get the message from the cache.
Parameters
----------
i : int, optional
The number of the message. If not provided, return the
highest-numbered message.
Returns
-------
message : dict
Raises
------
IndexError if the message does not exist in the cache.
"""
if i is None:
keys = self.cache.keys()
if len(keys) == 0:
raise IndexError("index %r out of range" % i)
else:
i = max(self.cache.keys())
try:
return self.cache[i]
except KeyError:
# IndexError is more appropriate, here.
raise IndexError("index %r out of range" % i)
| 25.565657
| 80
| 0.468985
|
d331519454ab5c9c04c0db49d3c20a04ba57c6f1
| 15,033
|
py
|
Python
|
src/scripts/swagger_client/api/environment_api.py
|
shipyardbuild/circleci-orb
|
5cc1393aac44ff62b95db5ec725702d8a7dbb216
|
[
"MIT"
] | null | null | null |
src/scripts/swagger_client/api/environment_api.py
|
shipyardbuild/circleci-orb
|
5cc1393aac44ff62b95db5ec725702d8a7dbb216
|
[
"MIT"
] | 4
|
2021-09-01T21:15:02.000Z
|
2022-01-04T18:37:48.000Z
|
src/scripts/swagger_client/api/environment_api.py
|
shipyardbuild/circleci-orb
|
5cc1393aac44ff62b95db5ec725702d8a7dbb216
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Shipyard API
The official OpenAPI spec for the Shipyard API. # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class EnvironmentApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_environment(self, uuid, **kwargs): # noqa: E501
"""Get an environment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_environment(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: the environment's identifying UUID (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_environment_with_http_info(uuid, **kwargs) # noqa: E501
else:
(data) = self.get_environment_with_http_info(uuid, **kwargs) # noqa: E501
return data
def get_environment_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get an environment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_environment_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: the environment's identifying UUID (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_environment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params or
params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_environment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/environment/{uuid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_environments(self, **kwargs): # noqa: E501
"""List your organization's environments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_environments(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name:
:param str org_name:
:param str repo_name:
:param str branch:
:param int pull_request_number:
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_environments_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_environments_with_http_info(**kwargs) # noqa: E501
return data
def list_environments_with_http_info(self, **kwargs): # noqa: E501
"""List your organization's environments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_environments_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name:
:param str org_name:
:param str repo_name:
:param str branch:
:param int pull_request_number:
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'org_name', 'repo_name', 'branch', 'pull_request_number'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_environments" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'org_name' in params:
query_params.append(('org_name', params['org_name'])) # noqa: E501
if 'repo_name' in params:
query_params.append(('repo_name', params['repo_name'])) # noqa: E501
if 'branch' in params:
query_params.append(('branch', params['branch'])) # noqa: E501
if 'pull_request_number' in params:
query_params.append(('pull_request_number', params['pull_request_number'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/environment', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def restart_environment(self, uuid, **kwargs): # noqa: E501
"""Restart an environment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.restart_environment(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: the environments's identifying UUID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.restart_environment_with_http_info(uuid, **kwargs) # noqa: E501
else:
(data) = self.restart_environment_with_http_info(uuid, **kwargs) # noqa: E501
return data
def restart_environment_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Restart an environment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.restart_environment_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: the environments's identifying UUID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method restart_environment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params or
params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `restart_environment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/environment/{uuid}/restart', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def stop_environment(self, uuid, **kwargs): # noqa: E501
"""Stop an environment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stop_environment(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: the environments's identifying UUID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.stop_environment_with_http_info(uuid, **kwargs) # noqa: E501
else:
(data) = self.stop_environment_with_http_info(uuid, **kwargs) # noqa: E501
return data
def stop_environment_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Stop an environment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stop_environment_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: the environments's identifying UUID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method stop_environment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params or
params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `stop_environment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/environment/{uuid}/stop', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.665854
| 118
| 0.601211
|
7c03816424ba348f3cfd072504c4963468046db8
| 3,333
|
py
|
Python
|
simulations/old/analysis/plot-performance.py
|
hawkrobe/fish
|
2000e46c397f7c95bba8ecb0c6afd26013929ff8
|
[
"MIT"
] | 1
|
2015-12-11T16:51:08.000Z
|
2015-12-11T16:51:08.000Z
|
simulations/old/analysis/plot-performance.py
|
hawkrobe/fish
|
2000e46c397f7c95bba8ecb0c6afd26013929ff8
|
[
"MIT"
] | 3
|
2020-02-11T21:36:11.000Z
|
2020-11-01T21:25:17.000Z
|
simulations/old/analysis/plot-performance.py
|
hawkrobe/couzin_replication
|
ff491639954f0652d6b4b2a318477bb54c38fadf
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from parse import *
sys.path.append("../utils/")
from utils import *
subset_noise = False
noise_level = '1-2en01'
subset_difficulty = True
diff_level = '1en01'
data_dir = '../../out/'
games = []
games += get_games(data_dir, 'experiment')
#games += ['tmp']
data = get_data(data_dir, games)
#data = data[data['n_players'] < 6]
#data = data[data['score'] > 0.7]
if subset_noise:
data = data[data['noise'] == noise_level]
if subset_difficulty:
data = data[data['difficulty'] == diff_level]
def get_shape(i):
shapes = ['$a$',
'$b$',
'$c$',
'$d$',
'$e$',
'$f$',
'$g$',
'$h$',
'$i$',
'$j$',
'$k$',
'$l$',
'$m$',
'$n$',
'$o$',
'$p$',
'$q$',
'$r$',
'$s$',
'$t$',
'$u$',
'$v$',
'$w$',
'$x$',
'$y$',
'$z$',
(4,0,0), (3,0,0), (4,0,45), (0,3,0), (3,0,90),
(3,0,180), (3,0,270),
(5,0,0), (6,0,0), (6,0,90), (4,2,0), (4,3,0),
(5,2,0), (5,3,0),
"d", "*", "$a$"]
return shapes[i % len(shapes)]
plt.rcParams.update(pd.tools.plotting.mpl_stylesheet)
fig, ax = plt.subplots()
ax.margins(0.05)
colors = ['#348ABD', '#467821', '#E4C334', '#A60628']
counts = dict([(n,0) for n in set(data['n_players'])])
data['nn_players'] = data['n_players'] + np.random.randn(len(data))*0.1
i = 0
for noise in set(data['noise']):
if not subset_noise or noise == noise_level:
noise_sub = data[data['noise'] == noise]
x = sorted(set(noise_sub['n_players']))
y = []
for n in x:
y += [np.mean(noise_sub[noise_sub['n_players'] == n]['score'])]
ax.plot(x, y, c = colors[i], lw = 10, alpha = 0.5)
i += 1
i = 0
j = 0
for noise in set(data['noise']):
if not subset_noise or noise == noise_level:
noise_sub = data[data['noise'] == noise]
for game in set(noise_sub['game']):
sub = noise_sub[noise_sub['game'] == game]
n = list(set(sub['n_players']))[0]
ax.plot(sub['nn_players'], sub['score'], marker='o', linestyle='', ms = 20, c = colors[i])
ax.plot(sub['nn_players'], sub['score'], marker=get_shape(counts[n]), linestyle='', ms = 10, c = 'black')
counts[n] += 1
i += 1
#legend = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), title = 'Background', numpoints=1)
#legend.get_title().set_fontsize('30')
#plt.setp(plt.gca().get_legend().get_texts(), fontsize='20')
plt.xlabel('Number of Players', fontsize=50)
plt.ylabel('Individual Score', fontsize=50)
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
if subset_difficulty:
fig.savefig('../../plots/performance-' + diff_level + '.png',dpi=100)
elif subset_noise:
fig.savefig('../../plots/performance-' + noise_level + '.png',dpi=100)
else:
fig.savefig('../../plots/performance-all.png',dpi=100)
#plt.show()
| 26.879032
| 122
| 0.50045
|
55282e4ce1304bfb626ecca8d5812bc05fb8aebe
| 449
|
py
|
Python
|
networkapi/api_aws/serializers.py
|
vinicius-marinho/GloboNetworkAPI
|
94651d3b4dd180769bc40ec966814f3427ccfb5b
|
[
"Apache-2.0"
] | 73
|
2015-04-13T17:56:11.000Z
|
2022-03-24T06:13:07.000Z
|
networkapi/api_aws/serializers.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | 99
|
2015-04-03T01:04:46.000Z
|
2021-10-03T23:24:48.000Z
|
networkapi/api_aws/serializers.py
|
shildenbrand/GloboNetworkAPI
|
515d5e961456cee657c08c275faa1b69b7452719
|
[
"Apache-2.0"
] | 64
|
2015-08-05T21:26:29.000Z
|
2022-03-22T01:06:28.000Z
|
# -*- coding: utf-8 -*-
from django.db.models import get_model
from networkapi.util.serializers import DynamicFieldsModelSerializer
class AwsVPCSerializer(DynamicFieldsModelSerializer):
class Meta:
VPC = get_model('api_aws', 'VPC')
depth = 1
model = VPC
fields = (
'id',
'vpc'
)
default_fields = fields
basic_fields = fields
details_fields = fields
| 18.708333
| 68
| 0.599109
|
306ad0387bde92138f9134bd63b9b8cefa199ff0
| 91
|
py
|
Python
|
app/marksapp/apps.py
|
CFelipe/marks
|
cc7e42eca9599f7e9df96f93c764faadf15f9bea
|
[
"MIT"
] | 21
|
2019-05-13T12:54:47.000Z
|
2022-01-10T19:51:26.000Z
|
app/marksapp/apps.py
|
CFelipe/marks
|
cc7e42eca9599f7e9df96f93c764faadf15f9bea
|
[
"MIT"
] | 19
|
2018-11-18T20:10:09.000Z
|
2019-11-16T02:47:38.000Z
|
app/marksapp/apps.py
|
CFelipe/marks
|
cc7e42eca9599f7e9df96f93c764faadf15f9bea
|
[
"MIT"
] | 1
|
2022-02-23T16:23:06.000Z
|
2022-02-23T16:23:06.000Z
|
from django.apps import AppConfig
class MarksappConfig(AppConfig):
name = "marksapp"
| 15.166667
| 33
| 0.758242
|
07d9e9a8ac8f7478dbbf9bacce693eb55d514424
| 3,045
|
py
|
Python
|
tests/performance.py
|
redmoogle/jsonreader
|
b37db693a1f382661c44cf756e5ac7a6a3953010
|
[
"MIT"
] | null | null | null |
tests/performance.py
|
redmoogle/jsonreader
|
b37db693a1f382661c44cf756e5ac7a6a3953010
|
[
"MIT"
] | null | null | null |
tests/performance.py
|
redmoogle/jsonreader
|
b37db693a1f382661c44cf756e5ac7a6a3953010
|
[
"MIT"
] | 1
|
2021-04-13T15:09:58.000Z
|
2021-04-13T15:09:58.000Z
|
"""
Ensures it can read and write to the file
"""
import guildreader
import unittest
import random
import time
class FakeGuild:
def __init__(self):
self.id = random.randint(1, 999999999)
class FakeBot:
def __init__(self):
self.guilds = []
for _ in range(1000):
self.guilds += [FakeGuild()]
# noinspection PyAttributeOutsideInit
class Performance(unittest.TestCase):
def setUp(self):
self.bot = FakeBot()
self.delta = time.time()
def step1(self):
delta = time.time()
guildreader.create_file(self.bot, "test", {"A": 10, "B": [1, 2, 3, 4]}, wipe=True)
print(f'Creation Time: {time.time()-delta} seconds...')
def step2(self):
delta = time.time()
for guild in self.bot.guilds:
data = guildreader.read_file(guild.id, "test")
assert data["A"] == 10
assert data["B"] == [1, 2, 3, 4]
print(f'Full Read Time: {time.time()-delta} seconds...')
def step3(self):
delta = time.time()
for guild in self.bot.guilds:
data = guildreader.read_file(guild.id, "test")
data["A"] = 80
data["B"] = [1, 2, 3, 4, 5, 6, 8, 9, 10]
guildreader.write_file(guild.id, "test", data)
print(f'Full Write Time: {time.time()-delta} seconds...')
def step4(self):
delta = time.time()
for guild in self.bot.guilds:
data = guildreader.read_file(guild.id, "test")
assert data["A"] == 80
assert data["B"] == [1, 2, 3, 4, 5, 6, 8, 9, 10]
print(f'Full Reread Time: {time.time()-delta} seconds...')
def step5(self):
delta = time.time()
guildreader.create_file(self.bot, "test", {"A": 10, "B": [1, 2, 3, 4]}, wipe=True)
print(f'Recreation Time: {time.time()-delta} seconds...')
def step6(self):
delta = time.time()
data = guildreader.read_file(self.bot.guilds[0].id, "test")
assert data["A"] == 10
assert data["B"] == [1, 2, 3, 4]
print(f'Single Read Time: {time.time() - delta} seconds...')
def step7(self):
delta = time.time()
data = guildreader.read_file(self.bot.guilds[0].id, "test")
data["A"] = 80
data["B"] = [1, 2, 3, 4, 5, 6, 8, 9, 10]
guildreader.write_file(self.bot.guilds[0].id, "test", data)
print(f'Single Write Time: {time.time() - delta} seconds...')
def step8(self):
delta = time.time()
data = guildreader.read_file(self.bot.guilds[0].id, "test")
assert data["A"] == 80
assert data["B"] == [1, 2, 3, 4, 5, 6, 8, 9, 10]
print(f'Single Reread Time: {time.time() - delta} seconds...')
def _steps(self):
for name in dir(self): # dir() result is implicitly sorted
if name.startswith("step"):
yield name, getattr(self, name)
def test_steps(self):
for name, step in self._steps():
step()
if __name__ == '__main__':
unittest.main()
| 31.071429
| 90
| 0.548112
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.