hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5649e4023138ee826410147fce1ccfbb2299a135
| 22,538
|
py
|
Python
|
type_erasure/code.py
|
lubkoll/friendly-type-erasure
|
719830233a8652ccf18164653b466b0054a617f6
|
[
"MIT"
] | null | null | null |
type_erasure/code.py
|
lubkoll/friendly-type-erasure
|
719830233a8652ccf18164653b466b0054a617f6
|
[
"MIT"
] | 22
|
2016-08-03T16:51:10.000Z
|
2016-11-23T20:53:03.000Z
|
type_erasure/code.py
|
lubkoll/friendly-type-erasure
|
719830233a8652ccf18164653b466b0054a617f6
|
[
"MIT"
] | null | null | null |
import cpp_file_parser
import parser_addition
import util
####################################################################
# Apply std::decay
####################################################################
def get_decayed(type_):
return 'typename std :: decay < ' + type_ + ' > :: type'
def get_handle_return_type(data, classname):
if data.copy_on_write:
if data.small_buffer_optimization:
return 'std :: shared_ptr < HandleBase < ' + data.interface_type + ' , Buffer > > '
else:
return 'std :: shared_ptr < HandleBase < ' + data.interface_type + ' > > '
else:
return classname + ' * '
def get_generator(data, classname):
if data.copy_on_write:
return 'std :: make_shared < typename std :: decay < ' + classname + ' > :: type >'
else:
return 'new typename std :: decay < ' + classname + ' > :: type'
static_reference_check = 'typename std :: enable_if < std :: is_same < typename std :: remove_const < T > :: type , ' + \
get_decayed('U') + ' & > :: value > :: type * = nullptr '
def get_static_value_check(first_type, second_type):
return 'typename std :: enable_if < std :: is_same < ' + first_type + ' , ' + get_decayed(second_type) + \
' > :: value > :: type * = nullptr'
def enable_if_not_same(first_type, second_type):
return 'typename std :: enable_if < ! std :: is_same < ' + first_type + ' , ' + get_decayed(second_type) + \
' > :: value > :: type * = nullptr'
noexcept_if_nothrow_constructible = 'noexcept ( type_erasure_detail :: is_nothrow_constructible < U > ( ) )'
def get_default_default_constructor(classname, noexcept='', constexpr=''):
return (constexpr and constexpr + ' ' or '') + classname + ' ( ) ' + (noexcept and noexcept + ' ' or '') + '= default ;'
def enable_if_not_same_and_compatible(classname, second_type, detail_namespace):
decayed_type = get_decayed(second_type)
return 'typename std :: enable_if < ' + detail_namespace + ' :: ' + classname + 'Concept < ' + classname + \
' , ' + decayed_type + ' > :: value > :: type * = nullptr'
def get_constructor_from_value_declaration(classname, detail_namespace=''):
if detail_namespace:
return 'template < typename T , ' + enable_if_not_same_and_compatible(classname, 'T',
detail_namespace) + ' > ' + classname + ' ( T && value ) '
return 'template < typename T , ' + enable_if_not_same(classname, 'T') + ' > ' + classname + ' ( T && value ) '
def get_handle_constructor_body_for_small_buffer_optimization(data, clone_into):
return '' + data.impl_member + ' = type_erasure_detail :: ' + clone_into + ' < HandleBase , StackAllocatedHandle < ' + get_decayed('T') + \
' > , HeapAllocatedHandle < ' + get_decayed('T') + ' > > ( std :: forward < T > ( value ) , buffer_ ) ; '
def get_handle_constructor(data, classname, handle_namespace):
constructor = get_constructor_from_value_declaration(classname)
if data.small_buffer_optimization:
clone_into = 'clone_into_shared_ptr' if data.copy_on_write else 'clone_into'
constructor += '{ ' + get_handle_constructor_body_for_small_buffer_optimization(data, clone_into) + '}'
else:
constructor += ': ' + data.impl_member + ' ( ' + \
get_generator(data, handle_namespace + ' :: Handle < ' + get_decayed('T') + ' , ' + classname + ' > ') + ' '
constructor += '( std :: forward < T > ( value ) ) ) { }'
return constructor
def get_assignment_from_value(data, classname, detail_namespace):
code = 'template < typename T , '
if data.table:
code += enable_if_not_same_and_compatible(classname, 'T', detail_namespace)
else:
code += enable_if_not_same(classname, 'T')
code += ' > '
code += classname + ' & operator= ( T && value ) { '
if data.table:
return code + 'return * this = ' + classname + ' ( std :: forward < T > ( value ) ) ; }'
if data.small_buffer_optimization:
clone_into = 'clone_into_shared_ptr' if data.copy_on_write else 'clone_into'
if not data.copy_on_write:
code += 'reset ( ) ; '
code += get_handle_constructor_body_for_small_buffer_optimization(data, clone_into)
else:
if data.copy_on_write:
code += data.impl_member + ' = '
else:
code += '' + data.impl_member + ' . reset ( '
code += get_generator(data, detail_namespace + ' :: Handle < ' + get_decayed('T') + ' , ' + classname + ' > ')
code += ' ( std :: forward < T > ( value ) ) '
if not data.copy_on_write:
code += ') '
code += '; '
return code + 'return * this ; }'
def get_handle_copy_assignment_for_small_buffer_optimization(data):
return data.impl_member + ' = other . ' + data.impl_member + ' ? other . ' + data.impl_member + ' -> clone_into ( buffer_ ) : nullptr ; '
def get_cast_to_handle_base(buffer):
return 'static_cast < HandleBase * >( static_cast < void * > ( ' + buffer + ' ) )'
def get_handle_move_assignment_for_small_buffer_optimization(data, escape_sequence):
return escape_sequence + 'if ( type_erasure_detail :: is_heap_allocated ( other . ' + data.impl_member + ' , other . buffer_ ) ) ' + \
data.impl_member + ' = other . ' + data.impl_member + ' ; else { buffer_ = other.buffer_ ; ' + \
data.impl_member + ' = ' + get_cast_to_handle_base('& buffer_') + ' ; } '
def get_copy_constructor_for_table(data, classname):
declaration = classname + ' ( const ' + classname + ' & other ) : ' + data.function_table_member + ' ( other . '
declaration += data.function_table_member + ' ) '
if not data.no_rtti:
declaration += ', type_id_ ( other . type_id_ ) '
if data.small_buffer_optimization:
if data.copy_on_write:
declaration += ', ' + data.impl_member + ' ( other . ' + data.impl_member + ' ) { '
declaration += 'if ( !type_erasure_table_detail :: is_heap_allocated ( other . ' + data.impl_member + ' . get ( ) , other . buffer_ ) ) '
return declaration + 'other . functions_ . clone_into ( other . impl_ . get ( ) , buffer_ , impl_ ) ; }'
else:
return declaration + '{ ' + data.impl_member + ' = other . clone_into ( buffer_ ) ; }'
else:
declaration += ' , ' + data.impl_member + ' ( other . ' + data.impl_member + ' ? other . ' + \
data.function_table_member + ' . clone ( other . ' + data.impl_member + ' ) '
return declaration + ': nullptr ) { }'
def get_copy_constructor_for_handle(data, classname):
declaration = classname + ' ( const ' + classname + ' & other ) '
if data.small_buffer_optimization:
return declaration + '{ ' + get_handle_copy_assignment_for_small_buffer_optimization(data) + '}'
return declaration + ': ' + data.impl_member + ' ( other . ' + data.impl_member + ' ? other . ' + data.impl_member + ' -> clone ( ) : nullptr ) { }'
def get_pimpl_copy_constructor(data, classname, private_classname, member):
declaration = classname + ' ( const ' + classname + ' & other ) '
if data.small_buffer_optimization:
return declaration + '{ ' + get_handle_copy_assignment_for_small_buffer_optimization(data) + '}'
return declaration + ': ' + member + ' ( other . ' + member + ' ? new ' + private_classname + ' ( * other . pimpl_ ) : ' \
'nullptr ) { }'
def get_pimpl_move_constructor(data, classname, member):
declaration = classname + ' ( ' + classname + ' && other ) '
if data.small_buffer_optimization:
return declaration + '{ ' + get_handle_copy_assignment_for_small_buffer_optimization(data) + '}'
return declaration + ': ' + member + ' ( std::move( other ) . ' + member + ' ) { }'
def get_pimpl_copy_assignment(data, classname, private_classname, member):
declaration = classname + ' & ' + 'operator = ( const ' + classname + ' & other ) { '
if data.small_buffer_optimization:
declaration += get_handle_copy_assignment_for_small_buffer_optimization(data)
declaration += 'if ( other . ' + member + ' ) '
return declaration + member + ' . reset ( new ' + private_classname + '( * other . pimpl_ ) ) ; ' \
'else pimpl_ = nullptr ; return * this ; }'
def get_pimpl_move_assignment(data, classname, member):
declaration = classname + ' & ' + 'operator = ( ' + classname + ' && other ) { '
if data.small_buffer_optimization:
declaration += get_handle_copy_assignment_for_small_buffer_optimization(data)
return declaration + member + ' = std::move( other ) . ' + member + ' ; return * this ; }'
def get_copy_constructor(data, classname):
return get_copy_constructor_for_table(data, classname) if data.table \
else get_copy_constructor_for_handle(data, classname)
def get_move_constructor_for_table(data, classname):
declaration = classname + ' ( ' + classname + ' && other ) noexcept : '
declaration += data.function_table_member + ' ( other . ' + data.function_table_member + ' ) '
if not data.no_rtti:
declaration += ', type_id_ ( other . type_id_ ) '
if data.small_buffer_optimization:
if data.copy_on_write:
declaration += '{ if ( type_erasure_table_detail :: is_heap_allocated ( other . ' + data.impl_member + ' . get ( ) , '
declaration += 'other . buffer_ ) ) ' + data.impl_member + ' = std :: move ( other . ' + data.impl_member + ' ) ;'
declaration += 'else other . ' + data.function_table_member + ' . clone_into ( other . ' + data.impl_member + ' . get ( ) , '
return declaration + ' buffer_ , ' + data.impl_member + ' ) ; other . ' + data.impl_member + ' = nullptr ; }'
else:
declaration += '{ if ( ! other . ' + data.impl_member + ' ) { ' + data.impl_member + ' = nullptr ; return ; } '
declaration += 'if ( type_erasure_table_detail :: is_heap_allocated ( other . ' + data.impl_member + ' , other . buffer_ ) ) '
declaration += data.impl_member + ' = other . ' + data.impl_member + ' ; '
declaration += 'else { buffer_ = std :: move ( other . buffer_ ) ; ' + data.impl_member + ' = & buffer_ ; } '
return declaration + 'other . ' + data.impl_member + ' = nullptr ; }'
else:
declaration += ' , ' + data.impl_member + ' ( other . ' + data.impl_member + ' ) '
return declaration + '{ other . ' + data.impl_member + ' = nullptr ; }'
def get_move_constructor_for_handle(data, classname):
declaration = classname + ' ( ' + classname + ' && other ) noexcept '
if data.small_buffer_optimization:
escape_sequence = 'if ( ! other . ' + data.impl_member + ' ) return ; '
declaration += '{ ' + get_handle_move_assignment_for_small_buffer_optimization(data, escape_sequence)
else:
declaration += ': ' + data.impl_member + ' ( std :: move ( other.' + data.impl_member + ' ) ) { '
return declaration + 'other . ' + data.impl_member + ' = nullptr ; }'
def get_move_constructor(data, classname):
return get_move_constructor_for_table(data, classname) if data.table \
else get_move_constructor_for_handle(data, classname)
def get_copy_operator_for_handle(data, classname):
declaration = classname + ' & operator = ( const ' + classname + ' & other ) '
if data.small_buffer_optimization:
declaration += '{ ' + get_handle_copy_assignment_for_small_buffer_optimization(data)
else:
declaration += '{ ' + data.impl_member + ' . reset ( other . ' + data.impl_member + ' ? other . ' + data.impl_member + ' -> clone ( ) : nullptr ) ; '
return declaration + 'return * this ; }'
def get_copy_operator_for_table(data, classname):
declaration = classname + ' & operator = ( const ' + classname + ' & other ) { '
if not data.copy_on_write:
declaration += 'reset ( ) ; '
declaration += data.function_table_member + ' = other . ' + data.function_table_member + ' ; '
if not data.no_rtti:
declaration += 'type_id_ = other . type_id_ ; '
if data.small_buffer_optimization:
if data.copy_on_write:
declaration += data.impl_member + ' = other . ' + data.impl_member + ' ; '
declaration += 'if ( !type_erasure_table_detail :: is_heap_allocated ( other . ' + data.impl_member + ' . get ( ) , other . buffer_ ) ) '
declaration += 'other . functions_ . clone_into ( other . impl_ . get ( ) , buffer_ , impl_ ) ; '
else:
declaration += data.impl_member + ' = other . clone_into ( buffer_ ) ; '
else:
declaration += data.impl_member + ' = other . ' + data.impl_member + ' ? other . ' + data.function_table_member + ' . clone ( other . ' + data.impl_member + ' ) '
declaration += ': nullptr ; '
return declaration + 'return * this ; }'
def get_copy_operator(data, classname):
return get_copy_operator_for_table(data, classname) if data.table \
else get_copy_operator_for_handle(data, classname)
def get_move_operator_for_table(data, classname):
declaration = classname + ' & operator = ( ' + classname + ' && other ) noexcept { '
if not data.copy_on_write:
declaration += 'reset ( ) ; '
if not data.no_rtti:
declaration += 'type_id_ = other . type_id_ ; '
if data.small_buffer_optimization:
if data.copy_on_write:
declaration += data.function_table_member + ' = other . ' + data.function_table_member + ' ; '
declaration += 'if ( type_erasure_table_detail :: is_heap_allocated ( other . ' + data.impl_member + ' . get ( ) , '
declaration += 'other . buffer_ ) ) ' + data.impl_member + ' = std :: move ( other . ' + data.impl_member + ' ) ; '
declaration +='else other . ' + data.function_table_member + ' . clone_into ( other . ' + data.impl_member + ' . get ( ) , '
declaration += 'buffer_ , ' + data.impl_member + ' ) ;'
else:
declaration += 'if ( ! other . ' + data.impl_member + ' ) { ' + data.impl_member + ' = nullptr ; return * this ; } '
declaration += data.function_table_member + ' = other . ' + data.function_table_member + ' ; '
declaration += 'if ( type_erasure_table_detail :: is_heap_allocated ( other . ' + data.impl_member + ' , other . buffer_ ) ) '
declaration += data.impl_member + ' = other . ' + data.impl_member + ' ; '
declaration += 'else { buffer_ = std :: move ( other . buffer_ ) ; ' + data.impl_member + ' = & buffer_ ; } '
else:
declaration += data.function_table_member + ' = other . ' + data.function_table_member + ' ; '
declaration += data.impl_member + ' = other . ' + data.impl_member + ' ; '
declaration += 'other . ' + data.impl_member + ' = nullptr ; '
return declaration + 'return * this ; }'
def get_move_operator_for_handle(data, classname):
declaration = classname + ' & operator = ( ' + classname + ' && other ) noexcept '
if data.small_buffer_optimization:
escape_sequence = 'if ( ! other . ' + data.impl_member + ' ) { ' + data.impl_member + ' = nullptr ; return * this ; }'
declaration += '{ reset ( ) ; ' + get_handle_move_assignment_for_small_buffer_optimization(data, escape_sequence)
else:
declaration += '{ ' + data.impl_member + ' = std :: move ( other . ' + data.impl_member + ' ) ; '
return declaration + 'other . ' + data.impl_member + ' = nullptr ; return * this ; }'
def get_move_operator(data, classname):
return get_move_operator_for_table(data, classname) if data.table \
else get_move_operator_for_handle(data, classname)
####################################################################
# Explicit operator bool
####################################################################
def get_operator_bool_for_member_ptr(member):
return 'explicit operator bool ( ) const noexcept { return ' + member + ' != nullptr ; }'
def get_operator_bool_comment(declaration):
comment = ['/**\n',
' * @brief Checks if the type-erased interface holds an implementation.\n',
' * @return true if an implementation is stored, else false\n',
' */\n']
return parser_addition.Comment(comment, declaration)
####################################################################
# Casts via member function target
####################################################################
def get_cast(data, classname, handle_namespace, const=''):
const = const and const + ' ' or ''
declaration = 'template < class T > ' + const + 'T * target ( ) ' + const + 'noexcept '
impl = ('read ( )' if const else 'write ( )') if data.copy_on_write else data.impl_raw_member
if data.table:
if data.no_rtti:
code = declaration + '{ if ( ! ' + data.impl_member + ' ) return nullptr ; '
return code + 'return type_erasure_table_detail :: cast_impl < T > ( ' + impl + ' ) ; }'
else:
code = declaration + '{ if ( ! ' + data.impl_member + ' ) return nullptr ; '
return code + 'return type_erasure_table_detail :: dynamic_cast_impl < T > ( type_id_ , ' + impl + ' ) ; }'
else:
impl = impl if not data.copy_on_write else '& ' + impl
if data.small_buffer_optimization:
return declaration + '{ if ( type_erasure_detail :: is_heap_allocated ( ' + data.impl_raw_member + ' , buffer_ ) ) ' \
'return type_erasure_detail :: cast < ' + const + 'T , ' + \
const + 'HeapAllocatedHandle < T > > ( ' + impl + ' ) ; ' \
'return type_erasure_detail :: cast < ' + const + 'T , ' + \
const + 'StackAllocatedHandle < T > > ( ' + impl + ' ) ; }'
return declaration + '{ return type_erasure_detail :: cast < ' + const + 'T , ' + \
const + handle_namespace + ' :: Handle < T , ' + classname + ' > > ( ' + impl + ' ) ; }'
def get_handle_cast_comment(declaration, const=''):
comment = ['/**\n',
'* @brief Conversion of the stored implementation to @code ' + const + ' T* @endcode.\n',
'* @return pointer to the stored object if conversion was successful, else nullptr\n',
'*/\n']
return parser_addition.Comment(comment,declaration)
def get_handle_interface_function(data, function):
code = util.concat(function.tokens[:cpp_file_parser.get_declaration_end_index(function.name,function.tokens)],' ')
code += ' { assert ( ' + data.impl_member + ' ) ; ' + function.return_str + ' ' + data.impl_member + ' -> ' + function.name + ' ( '
arguments = cpp_file_parser.get_function_arguments(function)
for arg in arguments:
code += arg.in_single_function_call()
if arg is not arguments[-1]:
code += ' , '
return code + ' ) ; }'
####################################################################
# Handle specialization for std::reference_wrapper
####################################################################
def get_handle_specialization(data):
if data.small_buffer_optimization:
return 'template < class T , class Interface , class Buffer , bool HeapAllocated > ' \
'struct Handle < std :: reference_wrapper < T > , Interface , Buffer , HeapAllocated > : ' \
'Handle < T & , Interface , Buffer , HeapAllocated > { ' \
'Handle ( std :: reference_wrapper < T > ref ) noexcept : ' \
'Handle < T & , Interface , Buffer , HeapAllocated > ( ref . get ( ) ) { } ' \
'};'
return 'template < class T , class Interface > struct Handle < std :: reference_wrapper < T > , Interface > : Handle < T & , Interface > { ' \
'Handle ( std :: reference_wrapper < T > ref ) noexcept : Handle < T & , Interface > ( ref . get ( ) ) { } } ;'
####################################################################
# Copy-on-write: read
####################################################################
def get_read_function_for_handle(return_type, member):
return return_type + ' read ( ) const noexcept { assert ( ' + member + ' ) ; ' \
'return * ' + member + ' ; }'
def get_read_function_for_table(data, return_type):
return return_type + ' read ( ) const noexcept { assert ( ' + data.impl_member + ' ) ; return ' + data.impl_raw_member + ' ; }'
def get_read_function(data, return_type, member):
return get_read_function_for_table(data, return_type) if data.table \
else get_read_function_for_handle(return_type, member)
########################################
# Copy-on-write: write
########################################
def get_write_function_for_handle(data, return_type):
code = return_type + ' write ( ) { assert ( ' + data.impl_member + ' ) ; '
code += 'if ( ! ' + data.impl_member + ' . unique ( ) '
if data.small_buffer_optimization:
code += '&& type_erasure_detail :: is_heap_allocated ( ' + data.impl_raw_member + ' , buffer_ ) '
code += ') '
if data.small_buffer_optimization:
code += data.impl_member + ' = ' + data.impl_member + ' -> clone_into ( buffer_ ) ; '
else:
code += data.impl_member + ' = ' + data.impl_member + ' -> clone ( ) ; '
return code + 'return * ' + data.impl_member + ' ; }'
def get_write_function_for_table(data, return_type):
code = return_type + ' write ( ) { assert ( ' + data.impl_member + ' ) ; '
code += 'if ( ! ' + data.impl_member + ' . unique ( ) '
if data.small_buffer_optimization:
code += '&& type_erasure_table_detail :: is_heap_allocated ( ' + data.impl_raw_member + ' , buffer_ ) '
code += ') '
code += data.function_table_member + ' . clone ( read ( ) , ' + data.impl_member + ' ) ; '
return code + 'return read ( ) ; }'
def get_write_function(data, return_type):
return get_write_function_for_table(data, return_type) if data.table \
else get_write_function_for_handle(data, return_type)
def get_single_function_call(function):
return function.name + ' ( ' + cpp_file_parser.get_function_arguments_in_single_call(function) + ' ) '
| 53.281324
| 170
| 0.583459
|
94bf53bea1a06ab873fb6647b2e39de384c5dcc3
| 9,798
|
py
|
Python
|
redash/authentication/__init__.py
|
zero1number/redash
|
caabc4afa4e60e273782a46d84099857821c6500
|
[
"BSD-2-Clause"
] | 65
|
2020-07-17T09:34:42.000Z
|
2022-03-25T09:33:32.000Z
|
redash/authentication/__init__.py
|
zero1number/redash
|
caabc4afa4e60e273782a46d84099857821c6500
|
[
"BSD-2-Clause"
] | 229
|
2021-05-31T16:05:12.000Z
|
2022-03-31T19:28:39.000Z
|
redash/authentication/__init__.py
|
zero1number/redash
|
caabc4afa4e60e273782a46d84099857821c6500
|
[
"BSD-2-Clause"
] | 29
|
2020-08-13T16:02:26.000Z
|
2022-02-17T01:31:05.000Z
|
import hashlib
import hmac
import logging
import time
from datetime import timedelta
from urllib.parse import urlsplit, urlunsplit
from flask import jsonify, redirect, request, url_for, session
from flask_login import LoginManager, login_user, logout_user, user_logged_in
from redash import models, settings
from redash.authentication import jwt_auth
from redash.authentication.org_resolving import current_org
from redash.settings.organization import settings as org_settings
from redash.tasks import record_event
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.exceptions import Unauthorized
login_manager = LoginManager()
logger = logging.getLogger("authentication")
def get_login_url(external=False, next="/"):
if settings.MULTI_ORG and current_org == None:
login_url = "/"
elif settings.MULTI_ORG:
login_url = url_for(
"redash.login", org_slug=current_org.slug, next=next, _external=external
)
else:
login_url = url_for("redash.login", next=next, _external=external)
return login_url
def sign(key, path, expires):
if not key:
return None
h = hmac.new(key.encode(), msg=path.encode(), digestmod=hashlib.sha1)
h.update(str(expires).encode())
return h.hexdigest()
@login_manager.user_loader
def load_user(user_id_with_identity):
user = api_key_load_user_from_request(request)
if user:
return user
org = current_org._get_current_object()
try:
user_id, _ = user_id_with_identity.split("-")
user = models.User.get_by_id_and_org(user_id, org)
if user.is_disabled or user.get_id() != user_id_with_identity:
return None
return user
except (models.NoResultFound, ValueError, AttributeError):
return None
def request_loader(request):
user = None
if settings.AUTH_TYPE == "hmac":
user = hmac_load_user_from_request(request)
elif settings.AUTH_TYPE == "api_key":
user = api_key_load_user_from_request(request)
else:
logger.warning(
"Unknown authentication type ({}). Using default (HMAC).".format(
settings.AUTH_TYPE
)
)
user = hmac_load_user_from_request(request)
if org_settings["auth_jwt_login_enabled"] and user is None:
user = jwt_token_load_user_from_request(request)
return user
def hmac_load_user_from_request(request):
signature = request.args.get("signature")
expires = float(request.args.get("expires") or 0)
query_id = request.view_args.get("query_id", None)
user_id = request.args.get("user_id", None)
# TODO: 3600 should be a setting
if signature and time.time() < expires <= time.time() + 3600:
if user_id:
user = models.User.query.get(user_id)
calculated_signature = sign(user.api_key, request.path, expires)
if user.api_key and signature == calculated_signature:
return user
if query_id:
query = models.Query.query.filter(models.Query.id == query_id).one()
calculated_signature = sign(query.api_key, request.path, expires)
if query.api_key and signature == calculated_signature:
return models.ApiUser(
query.api_key,
query.org,
list(query.groups.keys()),
name="ApiKey: Query {}".format(query.id),
)
return None
def get_user_from_api_key(api_key, query_id):
if not api_key:
return None
user = None
# TODO: once we switch all api key storage into the ApiKey model, this code will be much simplified
org = current_org._get_current_object()
try:
user = models.User.get_by_api_key_and_org(api_key, org)
if user.is_disabled:
user = None
except models.NoResultFound:
try:
api_key = models.ApiKey.get_by_api_key(api_key)
user = models.ApiUser(api_key, api_key.org, [])
except models.NoResultFound:
if query_id:
query = models.Query.get_by_id_and_org(query_id, org)
if query and query.api_key == api_key:
user = models.ApiUser(
api_key,
query.org,
list(query.groups.keys()),
name="ApiKey: Query {}".format(query.id),
)
return user
def get_api_key_from_request(request):
api_key = request.args.get("api_key", None)
if api_key is not None:
return api_key
if request.headers.get("Authorization"):
auth_header = request.headers.get("Authorization")
api_key = auth_header.replace("Key ", "", 1)
elif request.view_args is not None and request.view_args.get("token"):
api_key = request.view_args["token"]
return api_key
def api_key_load_user_from_request(request):
api_key = get_api_key_from_request(request)
if request.view_args is not None:
query_id = request.view_args.get("query_id", None)
user = get_user_from_api_key(api_key, query_id)
else:
user = None
return user
def jwt_token_load_user_from_request(request):
org = current_org._get_current_object()
payload = None
if org_settings["auth_jwt_auth_cookie_name"]:
jwt_token = request.cookies.get(org_settings["auth_jwt_auth_cookie_name"], None)
elif org_settings["auth_jwt_auth_header_name"]:
jwt_token = request.headers.get(org_settings["auth_jwt_auth_header_name"], None)
else:
return None
if jwt_token:
payload, token_is_valid = jwt_auth.verify_jwt_token(
jwt_token,
expected_issuer=org_settings["auth_jwt_auth_issuer"],
expected_audience=org_settings["auth_jwt_auth_audience"],
algorithms=org_settings["auth_jwt_auth_algorithms"],
public_certs_url=org_settings["auth_jwt_auth_public_certs_url"],
)
if not token_is_valid:
raise Unauthorized("Invalid JWT token")
if not payload:
return
try:
user = models.User.get_by_email_and_org(payload["email"], org)
except models.NoResultFound:
user = create_and_login_user(current_org, payload["email"], payload["email"])
return user
def log_user_logged_in(app, user):
event = {
"org_id": user.org_id,
"user_id": user.id,
"action": "login",
"object_type": "redash",
"timestamp": int(time.time()),
"user_agent": request.user_agent.string,
"ip": request.remote_addr,
}
record_event.delay(event)
@login_manager.unauthorized_handler
def redirect_to_login():
if request.is_xhr or "/api/" in request.path:
response = jsonify(
{"message": "Couldn't find resource. Please login and try again."}
)
response.status_code = 404
return response
login_url = get_login_url(next=request.url, external=False)
return redirect(login_url)
def logout_and_redirect_to_index():
logout_user()
if settings.MULTI_ORG and current_org == None:
index_url = "/"
elif settings.MULTI_ORG:
index_url = url_for("redash.index", org_slug=current_org.slug, _external=False)
else:
index_url = url_for("redash.index", _external=False)
return redirect(index_url)
def init_app(app):
from redash.authentication import (
google_oauth,
saml_auth,
remote_user_auth,
ldap_auth,
)
login_manager.init_app(app)
login_manager.anonymous_user = models.AnonymousUser
login_manager.REMEMBER_COOKIE_DURATION = settings.REMEMBER_COOKIE_DURATION
@app.before_request
def extend_session():
session.permanent = True
app.permanent_session_lifetime = timedelta(seconds=settings.SESSION_EXPIRY_TIME)
from redash.security import csrf
for auth in [google_oauth, saml_auth, remote_user_auth, ldap_auth]:
blueprint = auth.blueprint
csrf.exempt(blueprint)
app.register_blueprint(blueprint)
user_logged_in.connect(log_user_logged_in)
login_manager.request_loader(request_loader)
def create_and_login_user(org, name, email, picture=None):
try:
user_object = models.User.get_by_email_and_org(email, org)
if user_object.is_disabled:
return None
if user_object.is_invitation_pending:
user_object.is_invitation_pending = False
models.db.session.commit()
if user_object.name != name:
logger.debug("Updating user name (%r -> %r)", user_object.name, name)
user_object.name = name
models.db.session.commit()
except NoResultFound:
logger.debug("Creating user object (%r)", name)
user_object = models.User(
org=org,
name=name,
email=email,
is_invitation_pending=False,
_profile_image_url=picture,
group_ids=[org.default_group.id],
)
models.db.session.add(user_object)
models.db.session.commit()
login_user(user_object, remember=True)
return user_object
def get_next_path(unsafe_next_path):
if not unsafe_next_path:
return ""
# Preventing open redirection attacks
parts = list(urlsplit(unsafe_next_path))
parts[0] = "" # clear scheme
parts[1] = "" # clear netloc
safe_next_path = urlunsplit(parts)
# If the original path was a URL, we might end up with an empty
# safe url, which will redirect to the login page. Changing to
# relative root to redirect to the app root after login.
if not safe_next_path:
safe_next_path = "./"
return safe_next_path
| 30.811321
| 103
| 0.658502
|
f929bfbbcfc9e087e11b38bf2fcf2162ed9710d7
| 13,070
|
py
|
Python
|
nipyapi/registry/rest.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/rest.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | 1
|
2020-03-16T10:02:46.000Z
|
2020-03-16T13:37:42.000Z
|
nipyapi/registry/rest.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.7.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import ssl
import certifi
import logging
import re
# python 2 and python 3 compatibility library
from six import PY3
from six.moves.urllib.parse import urlencode
from .configuration import Configuration
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""
Returns a dictionary of the response headers.
"""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""
Returns a given response header.
"""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, pools_size=4, maxsize=4):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680
# maxsize is the number of requests to host that are allowed in parallel
# ca_certs vs cert_file vs key_file
# http://stackoverflow.com/a/23957365/2985775
# cert_reqs
if Configuration().verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if Configuration().ssl_ca_cert:
ca_certs = Configuration().ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
# cert_file
cert_file = Configuration().cert_file
# key file
key_file = Configuration().key_file
# ssl_context
ssl_context = Configuration().ssl_context
# proxy
proxy = Configuration().proxy
# https pool manager
if proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=cert_file,
key_file=key_file,
ssl_context=ssl_context,
proxy_url=proxy
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=cert_file,
key_file=key_file,
ssl_context=ssl_context
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True, _request_timeout=None):
"""
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if PY3 else (int, long)):
timeout = urllib3.Timeout(total=_request_timeout)
elif isinstance(_request_timeout, tuple) and len(_request_timeout) == 2:
timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body:
request_body = json.dumps(body)
r = self.pool_manager.request(method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded':
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct Content-Type
# which generated by urllib3 will be overwritten.
del headers['Content-Type']
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is provided
# in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided arguments.
Please check that your arguments match declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None, _preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""
Custom error messages for exception
"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
| 41.100629
| 127
| 0.532135
|
1000e771fabb537dcba061abc39daab9c629fc54
| 673
|
py
|
Python
|
auth_login/models.py
|
annevandalfsen/screenbird
|
38b70302be3b3dc0c74b6aae8e09666115592aef
|
[
"MIT",
"Unlicense"
] | 121
|
2015-01-01T23:31:36.000Z
|
2021-05-27T04:24:44.000Z
|
auth_login/models.py
|
annevandalfsen/screenbird
|
38b70302be3b3dc0c74b6aae8e09666115592aef
|
[
"MIT",
"Unlicense"
] | 1
|
2017-02-08T04:34:14.000Z
|
2017-02-08T04:34:14.000Z
|
auth_login/models.py
|
annevandalfsen/screenbird
|
38b70302be3b3dc0c74b6aae8e09666115592aef
|
[
"MIT",
"Unlicense"
] | 31
|
2015-01-13T00:23:33.000Z
|
2017-05-13T21:50:29.000Z
|
from django.db import models
class CustomUserManager(models.Manager):
def create_user(self, username, email):
return self.model._default_manager.create(username=username)
class CustomUser(models.Model):
username = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
objects = CustomUserManager()
def is_authenticated(self):
return True
from social_auth.signals import pre_update
from social_auth.backends.facebook import FacebookBackend
def facebook_extra_values(sender, user, response, details, **kwargs):
return False
pre_update.connect(facebook_extra_values, sender=FacebookBackend)
| 26.92
| 69
| 0.777117
|
a5ff72cdfa4ac78318ab6aa62da5433cb4dddff2
| 3,362
|
py
|
Python
|
examples/benchmark.py
|
EthanRosenthal/implicit
|
ad2be694a9da6411732a939f5a959c9856050ae7
|
[
"MIT"
] | 16
|
2016-10-29T13:19:08.000Z
|
2022-03-16T14:13:58.000Z
|
examples/benchmark.py
|
BHamoudeh/implicit
|
ad2be694a9da6411732a939f5a959c9856050ae7
|
[
"MIT"
] | null | null | null |
examples/benchmark.py
|
BHamoudeh/implicit
|
ad2be694a9da6411732a939f5a959c9856050ae7
|
[
"MIT"
] | 12
|
2016-10-25T14:33:26.000Z
|
2022-03-21T06:47:14.000Z
|
""" A simple benchmark on the last.fm dataset
Compares the running time of this package vs the QMF library from Quora.
On my laptop (2015 Macbook Pro , Dual Core 3.1 GHz Intel Core i7) running
with 50 factors for 15 iterations this is the output:
QMF finished in 547.933080912
Implicit finished in 302.997884989
Implicit is 1.80837262587 times faster
(implicit-mf package was run separately, I estimate it at over 60,000 times
slower on the last.fm dataset - with an estimated running time of around 250 days)
"""
from __future__ import print_function
import logging
import argparse
import time
from subprocess import call
from implicit import alternating_least_squares
from lastfm import read_data, bm25_weight
def benchmark_implicit(matrix, factors, reg, iterations):
start = time.time()
alternating_least_squares(matrix, factors, reg, iterations)
return time.time() - start
def benchmark_qmf(qmfpath, matrix, factors, reg, iterations):
matrix = matrix.tocoo()
datafile = "qmf_data.txt"
open(datafile, "w").write("\n".join("%s %s %s" % vals
for vals in zip(matrix.row, matrix.col, matrix.data)))
def get_qmf_command(nepochs):
return [qmfpath, "--train_dataset", datafile,
"--nfactors", str(factors),
"--confidence_weight", "1",
"--nepochs", str(nepochs),
"--regularization_lambda", str(reg)]
# ok, so QMF needs to read the data in - and including
# that in the timing isn't fair. So run it once with no iterations
# to get a sense of how long reading the input data takes, and
# subtract from the final results
read_start = time.time()
call(get_qmf_command(0))
read_dataset_time = time.time() - read_start
calculate_start = time.time()
call(get_qmf_command(iterations))
return time.time() - calculate_start - read_dataset_time
def run_benchmark(args):
plays = bm25_weight(read_data(args.inputfile)[1])
qmf_time = benchmark_qmf(args.qmfpath, plays, args.factors, args.regularization,
args.iterations)
implicit_time = benchmark_implicit(plays, args.factors, args.regularization, args.iterations)
print("QMF finished in", qmf_time)
print("Implicit finished in", implicit_time)
print("Implicit is %s times faster" % (qmf_time / implicit_time))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generates Benchmark",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str,
dest='inputfile', help='last.fm dataset file')
parser.add_argument('--qmfpath', type=str,
dest='qmfpath', help='full path to qmf wals.bin file', required=True)
parser.add_argument('--factors', type=int, default=50, dest='factors',
help='Number of factors to calculate')
parser.add_argument('--reg', type=float, default=0.8, dest='regularization',
help='regularization weight')
parser.add_argument('--iter', type=int, default=15, dest='iterations',
help='Number of ALS iterations')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
run_benchmark(args)
| 38.204545
| 97
| 0.669244
|
cff3030721389d3498f342e9e8ba549468bfdf7e
| 2,200
|
py
|
Python
|
source_code/dataset/outlier_removal.py
|
sohailhabib/SecurityMetrics
|
7de3f462e89d97592e0c28a623bd6f7112b9a3b1
|
[
"MIT"
] | null | null | null |
source_code/dataset/outlier_removal.py
|
sohailhabib/SecurityMetrics
|
7de3f462e89d97592e0c28a623bd6f7112b9a3b1
|
[
"MIT"
] | null | null | null |
source_code/dataset/outlier_removal.py
|
sohailhabib/SecurityMetrics
|
7de3f462e89d97592e0c28a623bd6f7112b9a3b1
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021, Sohail Habib
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------------------------------------------------
Principle Component Analysis (PCA) dimensionality reduction
=====================
This class implements dimension reduction using Principle Component Analysis (PCA)
"""
from dataset.dataset_operation import DatasetOperation
import pandas as pd
from scipy.stats import zscore
import numpy as np
class OutLierRemoval(DatasetOperation):
"""
This class removes the outliers with z score greater than a threshold default threshold is 3
"""
def __init__(self):
"""
Initializes the class object
"""
self.df = pd.DataFrame()
self.z_score_threshold = 3
self.z_sore = None
self.df_out = pd.DataFrame()
return
def operate(self, data, z_score_threshold=3):
self.df = data
self.z_score_threshold = z_score_threshold
self.z_sore = np.abs(zscore(self.df))
outlier_filter = (self.z_sore <= z_score_threshold).all(axis=1)
self.df_out = self.df[outlier_filter]
return self.df_out
| 37.931034
| 120
| 0.695
|
5294de90113f0cf8b488ceccdb0e2e94d640e89f
| 5,801
|
py
|
Python
|
tests/test_platform.py
|
develmaycare/python-commonkit
|
329e723cdcc3591cf42ca5a02893c17ec28141c4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_platform.py
|
develmaycare/python-commonkit
|
329e723cdcc3591cf42ca5a02893c17ec28141c4
|
[
"BSD-3-Clause"
] | 7
|
2020-10-19T17:44:25.000Z
|
2021-05-27T22:44:51.000Z
|
tests/test_platform.py
|
develmaycare/python-commonkit
|
329e723cdcc3591cf42ca5a02893c17ec28141c4
|
[
"BSD-3-Clause"
] | 1
|
2021-06-10T10:42:06.000Z
|
2021-06-10T10:42:06.000Z
|
import contextlib
import os
from commonkit.platform import Platform
import unittest
# noinspection PyCompatibility
from unittest.mock import MagicMock, patch
# Helpers
@contextlib.contextmanager
def modified_environ(**environ):
"""Temporarily modify environment variables.
:param environ: The modified environment.
"""
old_environ = dict(os.environ)
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
# Tests
class TestDarwinPlatform(unittest.TestCase):
def setUp(self):
self.sys = MagicMock()
self.sys.configure_mock(platform="darwin")
def test_get_configuration_path(self):
"""Check the path for persistent application data on OS X."""
platform = Platform(self.sys)
self.assertTrue("Library/Application Support" in platform.get_configuration_path())
def test_get_temp_path(self):
"""Check the path for temporary application data on OS X."""
platform = Platform(self.sys)
self.assertTrue("Library/Caches" in platform.get_temp_path())
def test_is(self):
"""Check that system identification works as expected on OS X."""
platform = Platform(self.sys)
self.assertFalse(platform.is_linux)
self.assertTrue(platform.is_osx)
self.assertFalse(platform.is_windows)
def test_supports_color(self):
"""Check that color support works as expected on OS X."""
platform = Platform(self.sys)
self.assertTrue(platform.supports_color)
class TestLinuxPlatform(unittest.TestCase):
def setUp(self):
self.sys = MagicMock()
self.sys.configure_mock(platform="linux")
def test_get_configuration_path(self):
"""Check the path for persistent application data on Linux."""
platform = Platform(self.sys)
self.assertTrue(".config" in platform.get_configuration_path())
def test_get_temp_path(self):
"""Check the path for temporary application data on Linux."""
platform = Platform(self.sys)
self.assertTrue(".cache" in platform.get_temp_path())
def test_is(self):
"""Check that system identification works as expected on Linux."""
platform = Platform(self.sys)
self.assertTrue(platform.is_linux)
self.assertFalse(platform.is_osx)
self.assertFalse(platform.is_windows)
def test_supports_color(self):
"""Check that color support works as expected on Linux."""
platform = Platform(self.sys)
self.assertTrue(platform.supports_color)
class TestPythonVersion(unittest.TestCase):
def setUp(self):
self.sys = MagicMock()
self.sys.configure_mock(platform="darwin")
def test_is_python(self):
"""Check that Python version identification works as expected."""
platform = Platform(self.sys)
# noinspection PyUnresolvedReferences
with patch.object(self.sys, "version_info") as version_info:
version_info.major = 2
self.assertTrue(platform.is_python2)
# noinspection PyUnresolvedReferences
with patch.object(self.sys, "version_info") as version_info:
version_info.major = 3
self.assertTrue(platform.is_python3)
class TestUnrecognizedPlatform(unittest.TestCase):
def setUp(self):
self.sys = MagicMock()
self.sys.configure_mock(platform="whatzit")
class FakeStdOut(object):
# noinspection PyMethodMayBeStatic
def isatty(self):
return False
self.sys.stdout = FakeStdOut()
def test_get_configuration_path(self):
"""Check the path for persistent application data for an unrecognized operating system."""
platform = Platform(self.sys)
self.assertRaises(NotImplementedError, platform.get_configuration_path)
def test_get_temp_path(self):
"""Check the path for temporary application data for an unrecognized operating system."""
platform = Platform(self.sys)
self.assertRaises(NotImplementedError, platform.get_temp_path)
def test_is(self):
"""Check that system identification works as expected for an unrecognized operating system."""
platform = Platform(self.sys)
self.assertFalse(platform.is_linux)
self.assertFalse(platform.is_osx)
self.assertFalse(platform.is_windows)
def test_supports_color(self):
"""Check that color support does not work for ANSICON."""
platform = Platform(self.sys)
with modified_environ(ANSICON="present"):
self.assertFalse(platform.supports_color)
self.assertFalse(platform.supports_color)
class TestWindowsPlatform(unittest.TestCase):
def setUp(self):
self.sys = MagicMock()
self.sys.configure_mock(platform="win32")
def test_get_configuration_path(self):
"""Check the path for persistent application data on Windows."""
platform = Platform(self.sys)
self.assertTrue("Roaming" in platform.get_configuration_path())
def test_get_temp_path(self):
"""Check the path for temporary application data on Windows."""
platform = Platform(self.sys)
self.assertTrue("Locals" in platform.get_temp_path())
def test_is(self):
"""Check that system identification works as expected on Windows."""
platform = Platform(self.sys)
self.assertFalse(platform.is_linux)
self.assertFalse(platform.is_osx)
self.assertTrue(platform.is_windows)
def test_supports_color(self):
"""Check that color support does not work for Windows."""
platform = Platform(self.sys)
self.assertFalse(platform.supports_color)
| 30.531579
| 102
| 0.67971
|
52dbdab4e8e2f737bbb50f29cfeff9b3c61f24aa
| 8,954
|
py
|
Python
|
fishervector/FisherVector.py
|
ZQSIAT/fishervector
|
d2f2cc3e5132a1834d4e4cadd5d341fb2d16add5
|
[
"MIT"
] | 1
|
2018-08-16T01:42:43.000Z
|
2018-08-16T01:42:43.000Z
|
fishervector/FisherVector.py
|
ZQSIAT/fishervector
|
d2f2cc3e5132a1834d4e4cadd5d341fb2d16add5
|
[
"MIT"
] | null | null | null |
fishervector/FisherVector.py
|
ZQSIAT/fishervector
|
d2f2cc3e5132a1834d4e4cadd5d341fb2d16add5
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.mixture import GaussianMixture
import pickle, os
N_Kernel_Choices = [5, 20, 60, 100, 200, 500]
class FisherVectorGMM:
def __init__(self, n_kernels=1, covariance_type='diag'):
assert covariance_type in ['diag', 'full']
assert n_kernels > 0
self.n_kernels = n_kernels
self.covariance_type = covariance_type
self.fitted = False
def score(self, X):
return self.gmm.bic(X.reshape(-1, X.shape[-1]))
def fit(self, X, model_dump_path=None, verbose=True):
"""
:param X: either a ndarray with 4 dimensions (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
or with 3 dimensions (n_images, n_descriptors_per_image, n_dim_descriptor)
:param model_dump_path: (optional) path where the fitted model shall be dumped
:param verbose - boolean that controls the verbosity
:return: fitted Fisher vector object
"""
if X.ndim == 4:
self.ndim = 4
return self._fit(X, model_dump_path=model_dump_path, verbose=verbose)
elif X.ndim == 3:
self.ndim = 3
X = np.reshape(X, [1] + list(X.shape))
return self._fit(X, model_dump_path=model_dump_path, verbose=verbose)
else:
raise AssertionError("X must be an ndarray with 3 or 4 dimensions")
def fit_by_bic(self, X, choices_n_kernels=N_Kernel_Choices, model_dump_path=None, verbose=True):
"""
Fits the GMM with various n_kernels and selects the model with the lowest BIC
:param X: either a ndarray with 4 dimensions (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
or with 3 dimensions (n_images, n_descriptors_per_image, n_dim_descriptor)
:param choices_n_kernels: array of positive integers that specify with how many kernels the GMM shall be trained
default: [20, 60, 100, 200, 500]
:param model_dump_path: (optional) path where the fitted model shall be dumped
:param verbose - boolean that controls the verbosity
:return: fitted Fisher vector object
"""
if X.ndim == 4:
self.ndim = 4
return self._fit_by_bic(X, choices_n_kernels=choices_n_kernels, model_dump_path=model_dump_path, verbose=verbose)
elif X.ndim == 3:
self.ndim = 3
X = np.reshape(X, [1] + list(X.shape))
return self._fit_by_bic(X, choices_n_kernels=choices_n_kernels, model_dump_path=model_dump_path, verbose=verbose)
else:
raise AssertionError("X must be an ndarray with 3 or 4 dimensions")
def predict(self, X, normalized=True):
"""
Computes Fisher Vectors of provided X
:param X: either a ndarray with 4 dimensions (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
or with 3 dimensions (n_images, n_descriptors_per_image, n_dim_descriptor)
:param normalized: boolean that indicated whether the fisher vectors shall be normalized --> improved fisher vector
(https://www.robots.ox.ac.uk/~vgg/rg/papers/peronnin_etal_ECCV10.pdf)
:returns fv: fisher vectors
if X.ndim is 4 then returns ndarray of shape (n_videos, n_frames, 2*n_kernels, n_feature_dim)
if X.ndim is 3 then returns ndarray of shape (n_images, 2*n_kernels, n_feature_dim)
"""
if X.ndim == 4:
return self._predict(X, normalized=normalized)
elif X.ndim == 3:
orig_shape = X.shape
X = np.reshape(X, [1] + list(X.shape))
result = self._predict(X, normalized=normalized)
return np.reshape(result, (orig_shape[0], 2 * self.n_kernels, orig_shape[-1]))
else:
raise AssertionError("X must be an ndarray with 3 or 4 dimensions")
def _fit(self, X, model_dump_path=None, verbose=True):
"""
:param X: shape (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
:param model_dump_path: (optional) path where the fitted model shall be dumped
:param verbose - boolean that controls the verbosity
:return: fitted Fisher vector object
"""
assert X.ndim == 4
self.feature_dim = X.shape[-1]
X = X.reshape(-1, X.shape[-1])
# fit GMM and store params of fitted model
self.gmm = gmm = GaussianMixture(n_components=self.n_kernels, covariance_type=self.covariance_type, max_iter=1000).fit(X)
self.covars = gmm.covariances_
self.means = gmm.means_
self.weights = gmm.weights_
# if cov_type is diagonal - make sure that covars holds a diagonal matrix
if self.covariance_type == 'diag':
cov_matrices = np.empty(shape=(self.n_kernels, self.covars.shape[1], self.covars.shape[1]))
for i in range(self.n_kernels):
cov_matrices[i, :, :] = np.diag(self.covars[i, :])
self.covars = cov_matrices
assert self.covars.ndim == 3
self.fitted = True
if verbose:
print('fitted GMM with %i kernels'%self.n_kernels)
if model_dump_path:
with open(model_dump_path, 'wb') as f:
pickle.dump(self,f, protocol=4)
if verbose:
print('Dumped fitted model to', model_dump_path)
return self
def _fit_by_bic(self, X, choices_n_kernels=N_Kernel_Choices, model_dump_path=None, verbose=True):
"""
Fits the GMM with various n_kernels and selects the model with the lowest BIC
:param X: shape (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
:param choices_n_kernels: array of positive integers that specify with how many kernels the GMM shall be trained
default: [20, 60, 100, 200, 500]
:param model_dump_path: (optional) path where the fitted model shall be dumped
:param verbose - boolean that controls the verbosity
:return: fitted Fisher vector object
"""
bic_scores = []
for n_kernels in choices_n_kernels:
self.n_kernels = n_kernels
bic_score = self.fit(X, verbose=False).score(X)
bic_scores.append(bic_score)
if verbose:
print('fitted GMM with %i kernels - BIC = %.4f'%(n_kernels, bic_score))
best_n_kernels = choices_n_kernels[np.argmin(bic_scores)]
self.n_kernels = best_n_kernels
if verbose:
print('Selected GMM with %i kernels' % best_n_kernels)
return self.fit(X, model_dump_path=model_dump_path, verbose=True)
def _predict(self, X, normalized=True):
"""
Computes Fisher Vectors of provided X
:param X: features - ndarray of shape (n_videos, n_frames, n_features, n_feature_dim)
:param normalized: boolean that indicated whether the fisher vectors shall be normalized --> improved fisher vector
:returns fv: fisher vectors - ndarray of shape (n_videos, n_frames, 2*n_kernels, n_feature_dim)
"""
assert self.fitted, "Model (GMM) must be fitted"
assert self.feature_dim == X.shape[-1], "Features must have same dimensionality as fitted GMM"
assert X.ndim == 4
n_videos, n_frames = X.shape[0], X.shape[1]
X = X.reshape((-1, X.shape[-2], X.shape[-1])) #(n_images, n_features, n_feature_dim)
X_matrix = X.reshape(-1, X.shape[-1])
# set equal weights to predict likelihood ratio
self.gmm.weights_ = np.ones(self.n_kernels) / self.n_kernels
likelihood_ratio = self.gmm.predict_proba(X_matrix).reshape(X.shape[0], X.shape[1], self.n_kernels)
var = np.diagonal(self.covars, axis1=1, axis2=2)
norm_dev_from_modes = ((X[:,:, None, :] - self.means[None, None, :, :])/ var[None, None, :, :]) # (n_images, n_features, n_kernels, n_featur_dim)
# mean deviation
mean_dev = np.multiply(likelihood_ratio[:,:,:, None], norm_dev_from_modes).mean(axis=1) #(n_images, n_kernels, n_feature_dim)
mean_dev = np.multiply(1 / np.sqrt(self.weights[None, :, None]), mean_dev) #(n_images, n_kernels, n_feature_dim)
# covariance deviation
cov_dev = np.multiply(likelihood_ratio[:,:,:, None], norm_dev_from_modes**2 - 1).mean(axis=1)
cov_dev = np.multiply(1 / np.sqrt(2 * self.weights[None, :, None]), cov_dev)
fisher_vectors = np.concatenate([mean_dev, cov_dev], axis=1)
# final reshape - separate frames and videos
assert fisher_vectors.ndim == 3
fisher_vectors = fisher_vectors.reshape((n_videos, n_frames, fisher_vectors.shape[1], fisher_vectors.shape[2]))
if normalized:
fisher_vectors = np.sqrt(np.abs(fisher_vectors)) * np.sign(fisher_vectors) # power normalization
fisher_vectors = fisher_vectors / np.linalg.norm(fisher_vectors, axis=(2,3))[:,:,None,None]
fisher_vectors[fisher_vectors < 10**-4] = 0
assert fisher_vectors.ndim == 4
return fisher_vectors
@staticmethod
def load_from_pickle(pickle_path):
"""
loads a previously dumped FisherVectorGMM instance
:param pickle_path: path to the pickle file
:return: loaded FisherVectorGMM object
"""
assert os.path.isfile(pickle_path), 'pickle path must be an existing file'
with open(pickle_path, 'rb') as f:
fv_gmm = pickle.load(f)
assert isinstance(fv_gmm, FisherVectorGMM), 'pickled object must be an instance of FisherVectorGMM'
return fv_gmm
| 42.235849
| 149
| 0.694215
|
292a6ba642471bbcd869c0ac81c09d29bd13ec8d
| 5,955
|
py
|
Python
|
BSSN/ADMBSSN_tofrom_4metric.py
|
Steve-Hawk/nrpytutorial
|
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
|
[
"BSD-2-Clause"
] | 1
|
2019-12-23T05:31:25.000Z
|
2019-12-23T05:31:25.000Z
|
BSSN/ADMBSSN_tofrom_4metric.py
|
Steve-Hawk/nrpytutorial
|
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
|
[
"BSD-2-Clause"
] | null | null | null |
BSSN/ADMBSSN_tofrom_4metric.py
|
Steve-Hawk/nrpytutorial
|
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
|
[
"BSD-2-Clause"
] | 1
|
2021-03-02T12:51:56.000Z
|
2021-03-02T12:51:56.000Z
|
# As documented in the NRPy+ tutorial module
# Tutorial-ADMBSSN_tofrom_4metric.ipynb,
# this module will construct expressions for
# ADM or BSSN quantities in terms of the
# 4-metric g4DD, and g4DD/g4UU in terms of
# ADM/BSSN quantities.
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import sys # Standard Python modules for multiplatform OS-level functions
def setup_ADM_quantities(inputvars):
if inputvars == "ADM":
gammaDD = ixp.declarerank2("gammaDD", "sym01")
betaU = ixp.declarerank1("betaU")
alpha = sp.symbols("alpha", real=True)
elif inputvars == "BSSN":
import BSSN.ADM_in_terms_of_BSSN as AitoB
# Construct gamma_{ij} in terms of cf & gammabar_{ij}
AitoB.ADM_in_terms_of_BSSN()
gammaDD = AitoB.gammaDD
# Next construct beta^i in terms of vet^i and reference metric quantities
import BSSN.BSSN_quantities as Bq
Bq.BSSN_basic_tensors()
betaU = Bq.betaU
alpha = sp.symbols("alpha", real=True)
else:
print("inputvars = " + str(inputvars) + " not supported. Please choose ADM or BSSN.")
sys.exit(1)
return gammaDD,betaU,alpha
# g_{mu nu} in terms of BSSN (if inputvars=="BSSN") or ADM (if inputvars=="ADM") variables.
def g4DD_ito_BSSN_or_ADM(inputvars,gammaDD=None,betaU=None,alpha=None):
# Step 0: Declare g4DD as globals, to make interfacing with other modules/functions easier
global g4DD
# Step 1: Set gammaDD, betaU, and alpha if not already input.
if gammaDD==None and betaU==None and alpha==None:
gammaDD,betaU,alpha = setup_ADM_quantities(inputvars)
# Step 2: Compute g4DD = g_{mu nu}:
# To get \gamma_{\mu \nu} = gamma4DD[mu][nu], we'll need to construct the 4-metric, using Eq. 2.122 in B&S:
g4DD = ixp.zerorank2(DIM=4)
# Step 2.a: Compute beta_i via Eq. 2.121 in B&S
betaD = ixp.zerorank1()
for i in range(3):
for j in range(3):
betaD[i] += gammaDD[i][j] * betaU[j]
# Step 2.b: Compute beta_i beta^i, the beta contraction.
beta2 = sp.sympify(0)
for i in range(3):
beta2 += betaU[i] * betaD[i]
# Step 2.c: Construct g4DD via Eq. 2.122 in B&S
g4DD[0][0] = -alpha ** 2 + beta2
for mu in range(1, 4):
g4DD[mu][0] = g4DD[0][mu] = betaD[mu - 1]
for mu in range(1, 4):
for nu in range(1, 4):
g4DD[mu][nu] = gammaDD[mu - 1][nu - 1]
# g^{mu nu} in terms of BSSN (if inputvars=="BSSN") or ADM (if inputvars=="ADM") variables.
def g4UU_ito_BSSN_or_ADM(inputvars,gammaDD=None,betaU=None,alpha=None, gammaUU=None):
# Step 0: Declare g4UU as globals, to make interfacing with other modules/functions easier
global g4UU
# Step 1: Set gammaDD, betaU, and alpha if not already input.
if gammaDD==None and betaU==None and alpha==None:
gammaDD,betaU,alpha = setup_ADM_quantities(inputvars)
# Step 2: Compute g4UU = g_{mu nu}:
# To get \gamma^{\mu \nu} = gamma4UU[mu][nu], we'll need to use Eq. 2.119 in B&S.
g4UU = ixp.zerorank2(DIM=4)
# Step 3: Construct g4UU = g^{mu nu}
# Step 3.a: Compute gammaUU based on provided gammaDD:
if gammaUU==None:
gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
# Then evaluate g4UU:
g4UU = ixp.zerorank2(DIM=4)
g4UU[0][0] = -1 / alpha ** 2
for mu in range(1, 4):
g4UU[0][mu] = g4UU[mu][0] = betaU[mu - 1] / alpha ** 2
for mu in range(1, 4):
for nu in range(1, 4):
g4UU[mu][nu] = gammaUU[mu - 1][nu - 1] - betaU[mu - 1] * betaU[nu - 1] / alpha ** 2
# BSSN (if inputvars=="BSSN") or ADM (if inputvars=="ADM") metric variables in terms of g_{mu nu}
def BSSN_or_ADM_ito_g4DD(inputvars,g4DD=None):
# Step 0: Declare output variables as globals, to make interfacing with other modules/functions easier
if inputvars == "ADM":
global gammaDD, betaU, alpha
elif inputvars == "BSSN":
global hDD, cf, vetU, alpha
else:
print("inputvars = " + str(inputvars) + " not supported. Please choose ADM or BSSN.")
sys.exit(1)
# Step 1: declare g4DD as symmetric rank-4 tensor:
g4DD_is_input_into_this_function = True
if g4DD == None:
g4DD = ixp.declarerank2("g4DD", "sym01", DIM=4)
g4DD_is_input_into_this_function = False
# Step 2: Compute gammaDD & betaD
betaD = ixp.zerorank1()
gammaDD = ixp.zerorank2()
for i in range(3):
betaD[i] = g4DD[0][i]
for j in range(3):
gammaDD[i][j] = g4DD[i + 1][j + 1]
# Step 3: Compute betaU
# Step 3.a: Compute gammaUU based on provided gammaDD
gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
# Step 3.b: Use gammaUU to raise betaU
betaU = ixp.zerorank1()
for i in range(3):
for j in range(3):
betaU[i] += gammaUU[i][j] * betaD[j]
# Step 4: Compute alpha = sqrt(beta^2 - g_{00}):
# Step 4.a: Compute beta^2 = beta^k beta_k:
beta_squared = sp.sympify(0)
for k in range(3):
beta_squared += betaU[k] * betaD[k]
# Step 4.b: alpha = sqrt(beta^2 - g_{00}):
if g4DD_is_input_into_this_function == False:
alpha = sp.sqrt(sp.simplify(beta_squared) - g4DD[0][0])
else:
alpha = sp.sqrt(beta_squared - g4DD[0][0])
# Step 5: If inputvars == "ADM", we are finished. Return.
if inputvars == "ADM":
return
# Step 6: If inputvars == "BSSN", convert ADM to BSSN
import BSSN.BSSN_in_terms_of_ADM as BitoA
dummyBU = ixp.zerorank1()
BitoA.gammabarDD_hDD(gammaDD)
BitoA.cf_from_gammaDD(gammaDD)
BitoA.betU_vetU(betaU, dummyBU)
hDD = BitoA.hDD
cf = BitoA.cf
vetU = BitoA.vetU
| 38.173077
| 111
| 0.629219
|
c5b9459f9c625b2f5d0971f490415ea380965cfc
| 12,321
|
py
|
Python
|
virtual/lib/python3.6/site-packages/pilkit/utils.py
|
kenmutuma001/galleria
|
1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72
|
[
"Unlicense"
] | null | null | null |
virtual/lib/python3.6/site-packages/pilkit/utils.py
|
kenmutuma001/galleria
|
1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72
|
[
"Unlicense"
] | 12
|
2020-02-12T00:17:20.000Z
|
2022-02-10T08:34:42.000Z
|
virtual/lib/python3.6/site-packages/pilkit/utils.py
|
kenmutuma001/galleria
|
1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72
|
[
"Unlicense"
] | null | null | null |
import os
import mimetypes
import sys
from io import UnsupportedOperation
from .exceptions import UnknownExtension, UnknownFormat
from .lib import Image, ImageFile, StringIO, string_types
RGBA_TRANSPARENCY_FORMATS = ['PNG']
PALETTE_TRANSPARENCY_FORMATS = ['PNG', 'GIF']
DEFAULT_EXTENSIONS = {
'JPEG': '.jpg',
}
def img_to_fobj(img, format, autoconvert=True, **options):
return save_image(img, StringIO(), format, options, autoconvert)
def open_image(target):
target.seek(0)
return Image.open(target)
_pil_init = 0
def _preinit_pil():
"""Loads the standard PIL file format drivers. Returns True if ``preinit()``
was called (and there's a potential that more drivers were loaded) or False
if there is no possibility that new drivers were loaded.
"""
global _pil_init
if _pil_init < 1:
Image.preinit()
_pil_init = 1
return True
return False
def _init_pil():
"""Loads all PIL file format drivers. Returns True if ``init()`` was called
(and there's a potential that more drivers were loaded) or False if there is
no possibility that new drivers were loaded.
"""
global _pil_init
_preinit_pil()
if _pil_init < 2:
Image.init()
_pil_init = 2
return True
return False
def _extension_to_format(extension):
return Image.EXTENSION.get(extension.lower())
def _format_to_extension(format):
if format:
format = format.upper()
if format in DEFAULT_EXTENSIONS:
ext = DEFAULT_EXTENSIONS[format]
# It's not enough for an extension to be listed in
# ``DEFAULT_EXTENSIONS``, it must also be recognized by PIL.
if ext in Image.EXTENSION:
return ext
for k, v in Image.EXTENSION.items():
if v == format:
return k
return None
def extension_to_mimetype(ext):
try:
filename = 'a%s' % (ext or '') # guess_type requires a full filename, not just an extension
mimetype = mimetypes.guess_type(filename)[0]
except IndexError:
mimetype = None
return mimetype
def format_to_mimetype(format):
return extension_to_mimetype(format_to_extension(format))
def extension_to_format(extension):
"""Returns the format that corresponds to the provided extension.
"""
format = _extension_to_format(extension)
if not format and _preinit_pil():
format = _extension_to_format(extension)
if not format and _init_pil():
format = _extension_to_format(extension)
if not format:
raise UnknownExtension(extension)
return format
def format_to_extension(format):
"""Returns the first extension that matches the provided format.
"""
extension = None
if format:
extension = _format_to_extension(format)
if not extension and _preinit_pil():
extension = _format_to_extension(format)
if not extension and _init_pil():
extension = _format_to_extension(format)
if not extension:
raise UnknownFormat(format)
return extension
def suggest_extension(name, format):
original_extension = os.path.splitext(name)[1]
try:
suggested_extension = format_to_extension(format)
except UnknownFormat:
extension = original_extension
else:
if suggested_extension.lower() == original_extension.lower():
extension = original_extension
else:
try:
original_format = extension_to_format(original_extension)
except UnknownExtension:
extension = suggested_extension
else:
# If the formats match, give precedence to the original extension.
if format.lower() == original_format.lower():
extension = original_extension
else:
extension = suggested_extension
return extension
class FileWrapper(object):
def __init__(self, wrapped):
super(FileWrapper, self).__setattr__('_wrapped', wrapped)
def fileno(self):
try:
return self._wrapped.fileno()
except UnsupportedOperation:
raise AttributeError
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
return setattr(self._wrapped, name, value)
def __delattr__(self, key):
return delattr(self._wrapped, key)
def save_image(img, outfile, format, options=None, autoconvert=True):
"""
Wraps PIL's ``Image.save()`` method. There are two main benefits of using
this function over PIL's:
1. It gracefully handles the infamous "Suspension not allowed here" errors.
2. It prepares the image for saving using ``prepare_image()``, which will do
some common-sense processing given the target format.
"""
options = options or {}
if autoconvert:
img, save_kwargs = prepare_image(img, format)
# Use returned from prepare_image arguments for base
# and update them with provided options. Then use the result
save_kwargs.update(options)
options = save_kwargs
# Attempt to reset the file pointer.
try:
outfile.seek(0)
except AttributeError:
pass
def save(fp):
with quiet():
img.save(fp, format, **options)
# Some versions of PIL only catch AttributeErrors where they should also
# catch UnsupportedOperation exceptions. To work around this, we wrap the
# file with an object that will raise the type of error it wants.
if any(isinstance(outfile, t) for t in string_types):
# ...but don't wrap strings.
wrapper = outfile
else:
wrapper = FileWrapper(outfile)
try:
save(wrapper)
except IOError:
# PIL can have problems saving large JPEGs if MAXBLOCK isn't big enough,
# So if we have a problem saving, we temporarily increase it. See
# http://github.com/matthewwithanm/django-imagekit/issues/50
# https://github.com/matthewwithanm/django-imagekit/issues/134
# https://github.com/python-imaging/Pillow/issues/148
# https://github.com/matthewwithanm/pilkit/commit/0f914e8b40e3d30f28e04ffb759b262aa8a1a082#commitcomment-3885362
# MAXBLOCK must be at least as big as...
new_maxblock = max(
(len(options['exif']) if 'exif' in options else 0) + 5, # ...the entire exif header block
img.size[0] * 4, # ...a complete scan line
3 * img.size[0] * img.size[1], # ...3 bytes per every pixel in the image
)
if new_maxblock < ImageFile.MAXBLOCK:
raise
old_maxblock = ImageFile.MAXBLOCK
ImageFile.MAXBLOCK = new_maxblock
try:
save(wrapper)
finally:
ImageFile.MAXBLOCK = old_maxblock
try:
outfile.seek(0)
except AttributeError:
pass
return outfile
class quiet(object):
"""
A context manager for suppressing the stderr activity of PIL's C libraries.
Based on http://stackoverflow.com/a/978264/155370
"""
def __enter__(self):
try:
self.stderr_fd = sys.__stderr__.fileno()
except AttributeError:
# In case of Azure, the file descriptor is not present so we can return
# from here
return
try:
self.null_fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# If dev/null isn't writeable, then they just have to put up with
# the noise.
return
self.old = os.dup(self.stderr_fd)
os.dup2(self.null_fd, self.stderr_fd)
def __exit__(self, *args, **kwargs):
if not getattr(self, 'null_fd', None):
return
if not getattr(self, 'old', None):
return
os.dup2(self.old, self.stderr_fd)
os.close(self.null_fd)
os.close(self.old)
def prepare_image(img, format):
"""
Prepares the image for saving to the provided format by doing some
common-sense conversions. This includes things like preserving transparency
and quantizing. This function is used automatically by ``save_image()``
immediately before saving unless you specify ``autoconvert=False``. It is
provided as a utility for those doing their own processing.
:param img: The image to prepare for saving.
:param format: The format that the image will be saved to.
"""
make_opaque = False
save_kwargs = {}
format = format.upper()
if img.mode == 'RGBA':
if format in RGBA_TRANSPARENCY_FORMATS:
pass
elif format in PALETTE_TRANSPARENCY_FORMATS:
# If you're going from a format with alpha transparency to one
# with palette transparency, transparency values will be
# snapped: pixels that are more opaque than not will become
# fully opaque; pixels that are more transparent than not will
# become fully transparent. This will not produce a good-looking
# result if your image contains varying levels of opacity; in
# that case, you'll probably want to use a processor to composite
# the image on a solid color. The reason we don't do this by
# default is because not doing so allows processors to treat
# RGBA-format images as a super-type of P-format images: if you
# have an RGBA-format image with only a single transparent
# color, and save it as a GIF, it will retain its transparency.
# In other words, a P-format image converted to an
# RGBA-formatted image by a processor and then saved as a
# P-format image will give the expected results.
# Work around a bug in PIL: split() doesn't check to see if
# img is loaded.
img.load()
alpha = img.split()[-1]
mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE,
colors=255)
img.paste(255, mask)
save_kwargs['transparency'] = 255
else:
# Simply converting an RGBA-format image to an RGB one creates a
# gross result, so we paste the image onto a white background. If
# that's not what you want, that's fine: use a processor to deal
# with the transparency however you want. This is simply a
# sensible default that will always produce something that looks
# good. Or at least, it will look better than just a straight
# conversion.
make_opaque = True
elif img.mode == 'P':
if format in PALETTE_TRANSPARENCY_FORMATS:
try:
save_kwargs['transparency'] = img.info['transparency']
except KeyError:
pass
elif format in RGBA_TRANSPARENCY_FORMATS:
# Currently PIL doesn't support any RGBA-mode formats that
# aren't also P-mode formats, so this will never happen.
img = img.convert('RGBA')
else:
make_opaque = True
else:
img = img.convert('RGB')
# GIFs are always going to be in palette mode, so we can do a little
# optimization. Note that the RGBA sources also use adaptive
# quantization (above). Images that are already in P mode don't need
# any quantization because their colors are already limited.
if format == 'GIF':
img = img.convert('P', palette=Image.ADAPTIVE)
if make_opaque:
from .processors import MakeOpaque
img = MakeOpaque().process(img).convert('RGB')
if format == 'JPEG':
save_kwargs['optimize'] = True
return img, save_kwargs
def process_image(img, processors=None, format=None, autoconvert=True, options=None):
from .processors import ProcessorPipeline
original_format = img.format
# Run the processors
img = ProcessorPipeline(processors or []).process(img)
format = format or img.format or original_format or 'JPEG'
options = options or {}
return img_to_fobj(img, format, autoconvert, **options)
| 33.756164
| 120
| 0.637205
|
84df103bf805793a511e5ea2a91ffcae8300884d
| 13,081
|
py
|
Python
|
kimchi/kimchicli.py
|
garym/kimchi
|
784b4585ed2c4780c657c4510066ad29f0f1496a
|
[
"Apache-2.0"
] | 2
|
2015-09-19T13:50:06.000Z
|
2015-09-19T14:01:19.000Z
|
kimchi/kimchicli.py
|
garym/kimchi
|
784b4585ed2c4780c657c4510066ad29f0f1496a
|
[
"Apache-2.0"
] | null | null | null |
kimchi/kimchicli.py
|
garym/kimchi
|
784b4585ed2c4780c657c4510066ad29f0f1496a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2015 Gary Martin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cmd
import logging
import random
import string
import sys
from functools import partial
import Stemmer
from hashlib import sha1
from kimchi.genericapi import GenericAPI as Connector
from kimchi.arangodbapi import (Arango, ArangoError, Document, Edge,
SimpleQuery, Traversal)
DEF_CHAIN_ORDER = 2
MAX_REPLIES = 30
MAX_REPLY_LENGTH = 20
PUNCTUATION_MAP = str.maketrans({p: '' for p in string.punctuation})
class Brain(object):
def __init__(self, dbname="chains", chainorder=DEF_CHAIN_ORDER,
stemmer='english'):
conn = Connector('http://127.0.0.1:8529')
sysdb = Arango(conn._db._system._api.database)
try:
sysdb.create({'name': dbname})
except ArangoError as e:
if e.code != 409:
raise
db = conn._db[dbname]
self.docs = Document(db)
self.edges = Edge(db)
self.simple_query = SimpleQuery(db)
self.traversal = Traversal(db)
self.collection_name = "chains"
self.edge_collection_name = "links"
self.control_collection_name = "control"
self.chainorder = self.get_or_set_brain_info('chainorder', chainorder)
self.stop = self.get_or_set_brain_info('stop', '////////')
stemmer_type = self.get_or_set_brain_info('stemmer', stemmer)
self.stemmer = Stemmer.Stemmer(stemmer_type)
def stemWord(self, word):
return self.stemmer.stemWord(word.lower().translate(PUNCTUATION_MAP))
def get_or_set_brain_info(self, key, value):
collection = self.control_collection_name
try:
docs = self.simple_query.by_example(
collection, {'_key': key})
except KeyError:
docs = []
if docs:
return docs[0]['value']
doc = self.docs.create(
{'_key': key, 'value': value},
params={'collection': collection, 'createCollection': True})
return value
def add_nodes(self, nodegenerator):
stemWord = self.stemWord
handles = []
collection = self.collection_name
nodes = [n for n in nodegenerator]
full_length = len(nodes)
for i, node in enumerate(nodes):
data = {
'_key': sha1(str(node).encode('utf8')).hexdigest(),
'base_word_stem': stemWord(node[0]),
'node': node,
}
full_data = {
'outbound_distance': full_length - i, # distance to end
'inbound_distance': i + 1, # distance from start
}
full_data.update(data)
try:
docres = self.docs.create(full_data, params={
'collection': collection, 'createCollection': True})
except ArangoError:
docres = self.simple_query.by_example(collection, data)[0]
updated = False
for key in ('outbound_distance', 'inbound_distance'):
if key not in docres:
updated = True
elif docres[key] < full_data[key]:
full_data[key] = docres[key]
elif docres[key] > full_data[key]:
updated = True
if updated:
self.docs.update(docres['_id'], full_data, params={
'collection': collection})
handles.append(docres['_id'])
return handles
def add_edges(self, handles):
current_handle, *rest = handles
collection = self.edge_collection_name
for next_handle in rest:
data = {
'_key': sha1(
str((current_handle, next_handle)).encode('utf8')
).hexdigest(),
}
try:
self.edges.create(data, params={
'collection': collection, 'createCollection': True,
'from': current_handle, 'to': next_handle})
except ArangoError:
pass
current_handle = next_handle
def get_node_by_handle(self, handle):
return self.docs[handle]
def get_node_by_key(self, key):
return self.docs[self.collection_name][key]
def get_nodes_by_first_word(self, word):
return self.simple_query.by_example(
self.collection_name,
{'base_word_stem': self.stemWord(word)},
limit=10
)
def get_edge_by_handle(self, handle):
return self.edges[handle]
def get_edge_by_key(self, key):
return self.docs[self.edge_collection_name][key]
def chunk_msg(self, msg):
words = [self.stop] + msg.split() + [self.stop] * self.chainorder
return (words[i:i + self.chainorder + 1]
for i in range(1 + len(words) - self.chainorder))
def learn(self, msg, reply=False):
if msg.startswith('#'):
return
nodes_to_add = self.chunk_msg(msg)
nodes = self.add_nodes(nodes_to_add)
self.add_edges(nodes)
def get_word_chain(self, doc, direction):
visitor = """
if (! result || ! result.visited) { return; }
if (result.visited.vertices) {
result.visited.vertices.push(vertex.node[0]);
}
if (result.visited.paths) {
var cpath = [];
path.vertices.forEach(function (v) {
cpath.push(v.node[0]);
});
result.visited.paths.push(cpath);
}
"""
filterfn = """
if (path && path.length + vertex.%s > %d) {
return 'exclude';
}
""" % (direction + "_distance", MAX_REPLY_LENGTH)
result = self.traversal.traverse(
doc['_id'],
self.edge_collection_name,
direction=direction,
maxDepth=MAX_REPLY_LENGTH,
filterfn=filterfn,
visitor=visitor)
if 'result' not in result:
return []
paths = result['result']['visited']['paths']
returnpaths = [p[:-1] for p in paths if p[-1] == self.stop]
return returnpaths
def generate_candidate_reply(self, word_list):
sorted_words = sorted(word_list, key=len)[::-1]
replies = []
for word in sorted_words:
logging.debug(word)
docs = self.get_nodes_by_first_word(word)
random.shuffle(docs)
for doc in docs:
forward_words = self.get_word_chain(doc, "outbound")
reverse_words = self.get_word_chain(doc, "inbound")
for forward_chain in forward_words:
for reverse_chain in reverse_words:
reply = reverse_chain[::-1] + forward_chain[1:]
replies.append((self.score(reply, word_list), reply))
if len(replies) > MAX_REPLIES:
break
if len(replies) > MAX_REPLIES:
break
if replies:
return random.choice(sorted(replies)[::-1])[1]
def score(self, words, original):
if not words:
return 0.0
# words used less in the brain should improve score?
# sum(1 / len(self.get_nodes_by_first_word(w)) for w in words)
max_word_length = max(len(w) for w in words)
average_word_length = sum(len(w) for w in words) / len(words)
return (max_word_length * average_word_length *
len(set(words) - set(original)))
def generate_replies(self, msg):
words = msg.split()
#starttime = time.time()
#while time.time() - starttime < 0.25:
cr = self.generate_candidate_reply(words)
if not cr:
cr = ["I have nothing to say about that"]
return ' '.join(cr)
def run():
parser = argparse.ArgumentParser(description="A chat bot")
# database options
db_parser = argparse.ArgumentParser(add_help=False)
db_parser.add_argument(
'--dbname', default='chains',
help="Specifies the brain database.")
# simulation options
note = ("Note that this option is overridden by database settings and "
"so is only used at database initialisation time.")
modelling_parser = argparse.ArgumentParser(add_help=False)
modelling_parser.add_argument(
'--chain-order', type=int, default=DEF_CHAIN_ORDER,
help="Set the simulation chain size parameter. " + note)
modelling_parser.add_argument(
'--language', choices=Stemmer.algorithms(), default='english',
help="Set the simulation language for the stemmer. " + note)
# learning options
learning_parser = argparse.ArgumentParser(add_help=False)
learning_parser.add_argument(
'infile', metavar='INFILE', nargs='?', type=argparse.FileType('r'),
default=sys.stdin,
help="An input file from which to learn")
# reply options
reply_parser = argparse.ArgumentParser(add_help=False)
reply_parser.add_argument(
'message', metavar='MSG', nargs='+', action='append',
help="Specify a message to respond to.")
subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
subparsers.required = True
### learn command ###
learn_subparser = subparsers.add_parser(
'learn', help="add source data to the corpus",
parents=[learning_parser, db_parser, modelling_parser])
learn_subparser.set_defaults(func=do_learn)
### response command
reply_subparser = subparsers.add_parser(
'reply', help="send a message to get a reply back",
parents=[reply_parser, db_parser, modelling_parser])
reply_subparser.set_defaults(func=do_response)
### shell command
shell_subparser = subparsers.add_parser(
'shell', help="enter an interactive shell",
parents=[db_parser, modelling_parser])
shell_subparser.set_defaults(func=do_shell)
dargs = vars(parser.parse_args())
for option in ('file', 'message'):
if dargs.get(option):
dargs[option] = [x for xs in dargs[option] for x in xs]
dargs['func'](dargs)
def do_learn(dargs):
# TODO - add sensible behaviour for when no files are specified (stdin?)
brain = get_brain(dargs)
for i, msg in enumerate(dargs['infile']):
if i % 100 == 0:
logging.debug(i)
brain.learn(msg)
def do_response(dargs):
brain = get_brain(dargs)
for msg in dargs['message'] if dargs['message'] else []:
print(brain.generate_replies(msg))
def do_shell(dargs):
BrainShell(dargs).cmdloop()
def get_brain(dargs):
return Brain(dargs['dbname'], dargs['chain_order'],
stemmer=dargs['language'])
class BrainShell(cmd.Cmd):
"""Command processor for Kimchi"""
intro = ("+--------------------------------------+\n"
"| |\n"
"| # # ##### # # #### # # ##### |\n"
"| # # # ## ## # # # # |\n"
"| ### # # # # # ##### # |\n"
"| # # # # # # # # # |\n"
"| # # ##### # # #### # # ##### |\n"
"| |\n"
"+--------------------------------------+\n")
prompt = "kimchi> "
def __init__(self, dargs, *args, **kwargs):
self.brain = get_brain(dargs)
self.last_line = None
super(BrainShell, self).__init__(*args, **kwargs)
def do_EOF(self, line):
return self.do_quit(line)
def do_quit(self, line):
if len(line.split()) > 1:
self.default(line)
else:
print('Bye')
return True
def default(self, line):
self.do_learn(line)
self.last_line = line
def emptyline(self):
if self.last_line is not None:
self.do_reply(self.last_line)
def do_setbrain(self, line):
sl = line.split()
db, c_o = sl[:2] if len(sl) > 1 else (sl[0], DEF_CHAIN_ORDER)
self.brain = get_brain({'dbname': db, 'chain_order': c_o})
def do_learn(self, line):
self.last_line = None
self.brain.learn(line)
def do_reply(self, line):
self.last_line = None
reply = self.brain.generate_replies(line)
print(reply)
if __name__ == '__main__':
run()
| 34.514512
| 78
| 0.569681
|
fea39d0e41b7ff2999fac7a39ef06ebb9c4b8171
| 5,718
|
py
|
Python
|
mango/constants.py
|
nhatminhbeo/mango-explorer
|
270e3c32cf551e343a1bdaf9c93c5220067e1920
|
[
"MIT"
] | 131
|
2021-08-12T00:25:38.000Z
|
2022-03-30T21:22:00.000Z
|
mango/constants.py
|
nhatminhbeo/mango-explorer
|
270e3c32cf551e343a1bdaf9c93c5220067e1920
|
[
"MIT"
] | 42
|
2021-08-10T16:28:20.000Z
|
2022-03-25T01:52:07.000Z
|
mango/constants.py
|
nhatminhbeo/mango-explorer
|
270e3c32cf551e343a1bdaf9c93c5220067e1920
|
[
"MIT"
] | 83
|
2021-08-24T10:27:03.000Z
|
2022-03-29T07:27:44.000Z
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import decimal
import importlib.metadata
import json
import os.path
import typing
from dataclasses import dataclass
from solana.publickey import PublicKey
# # 🥭 Constants
#
# This file contains some hard-coded values, all kept in one place, as well as the mechanism
# for loading the Mango `ids.json` file.
# ## SYSTEM_PROGRAM_ADDRESS
#
# The Solana system program address is always 11111111111111111111111111111111.
#
SYSTEM_PROGRAM_ADDRESS = PublicKey("11111111111111111111111111111111")
# ## SOL_MINT_ADDRESS
#
# The fake mint address of the SOL token. **Note:** Wrapped SOL has a different mint address - it is So11111111111111111111111111111111111111112.
#
SOL_MINT_ADDRESS = PublicKey("So11111111111111111111111111111111111111111")
# ## SOL_DECIMALS
#
# The number of decimal places used to convert Lamports into SOLs.
#
SOL_DECIMALS = decimal.Decimal(9)
# ## SOL_DECIMAL_DIVISOR decimal
#
# The divisor to use to turn an integer value of SOLs from an account's `balance` into a value with the correct number of decimal places.
#
SOL_DECIMAL_DIVISOR = decimal.Decimal(10 ** SOL_DECIMALS)
# ## NUM_TOKENS
#
# This is currently hard-coded to 3.
#
NUM_TOKENS = 3
# ## NUM_MARKETS
#
# There is one fewer market than tokens.
#
NUM_MARKETS = NUM_TOKENS - 1
# # WARNING_DISCLAIMER_TEXT
#
# This is the warning text that is output on each run of a command.
#
WARNING_DISCLAIMER_TEXT = """
⚠ WARNING ⚠
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
🥭 Mango Markets: https://mango.markets
📄 Documentation: https://docs.mango.markets/
💬 Discord: https://discord.gg/67jySBhxrg
🐦 Twitter: https://twitter.com/mangomarkets
🚧 Github: https://github.com/blockworks-foundation
📧 Email: mailto:hello@blockworks.foundation
"""
# # _build_data_path
#
# The code needs access to some data files, such as the ids.json file that's used in multiple Mango projects. In
# this project all these data files are kept in the /data directory, relative to the project root.
#
# Some situations can make it tricky accessing files in that known location though. (In particular, Nuitka
# compilation to a standalone executable seems to make accessing internal paths with '..' in them impossible.)
#
# This function provides a consistent way to determine the correct data path for use throughout `mango-explorer`.
#
def _build_data_path() -> str:
possibilities: typing.Sequence[str] = ["../data", "data", ".", "../../data", "../../../data"]
attempts: typing.List[str] = []
file_root: str = os.path.dirname(__file__)
for possibility in possibilities:
data_path: str = os.path.normpath(os.path.join(file_root, possibility))
attempts += [data_path]
try:
attempted_ids_path: str = os.path.normpath(os.path.join(data_path, "ids.json"))
with open(attempted_ids_path) as ids_file:
json.load(ids_file)
return data_path
except:
pass
raise Exception(f"Could not determine data path - ids.json not found in: {attempts}")
# # DATA_PATH
#
# This is the path to the data directory that contains (among other things) the ids.json.
#
DATA_PATH: str = _build_data_path()
# ## MangoConstants
#
# Load all Mango Market's constants from its own `ids.json` file (retrieved from [GitHub](https://raw.githubusercontent.com/blockworks-foundation/mango-client-ts/main/src/ids.json).
#
with open(os.path.join(DATA_PATH, "ids.json")) as json_file:
MangoConstants = json.load(json_file)
# # 🥭 PackageVersion class
#
# Runtime details of the current version of mango-explorer.
#
@dataclass
class PackageVersion:
version: str
last_commit: str
def __str__(self) -> str:
return f"« PackageVersion {self.version} - '{self.last_commit}' »"
def __repr__(self) -> str:
return f"{self}"
def version() -> PackageVersion:
package_version: str = "Unknown"
# The exception is deliberately trapped and ignored - we just want to return "Unknown" in that situation.
try: # nosemgrep
package_version = importlib.metadata.version("mango-explorer")
except Exception:
pass
version_filename: str = os.path.join(DATA_PATH, ".version")
last_commit = f"Unknown (no version file found at '{version_filename}')."
if os.path.isfile(version_filename):
with open(version_filename) as version_file:
last_commit = version_file.read().strip()
return PackageVersion(version=package_version, last_commit=last_commit)
| 34.445783
| 460
| 0.727702
|
5a853e14b2e0afb0fd46b9ec512b1bfea4918f9e
| 35,603
|
py
|
Python
|
mlflow/pyfunc/__init__.py
|
dustindorroh/mlflow
|
1f29afea49c9a3aac0e9f34c59c86f23a6c6fccd
|
[
"Apache-2.0"
] | null | null | null |
mlflow/pyfunc/__init__.py
|
dustindorroh/mlflow
|
1f29afea49c9a3aac0e9f34c59c86f23a6c6fccd
|
[
"Apache-2.0"
] | null | null | null |
mlflow/pyfunc/__init__.py
|
dustindorroh/mlflow
|
1f29afea49c9a3aac0e9f34c59c86f23a6c6fccd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format <pyfunc-filesystem-format>`
for Python models and provides utilities for saving to and loading from this format. The format is
self contained in the sense that it includes all necessary information for anyone to load it and
use it. Dependencies are either stored directly with the model or referenced via a Conda
environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory.
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
>>> tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
>>> cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-inference-api:
*************
Inference API
*************
The convention for pyfunc models is to have a ``predict`` method or function with the following
signature::
predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.Series | pandas.DataFrame]
This convention is relied on by other MLflow components.
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import logging
import numpy as np
import os
import pandas
import shutil
from copy import deepcopy
import mlflow
import mlflow.pyfunc.model
import mlflow.pyfunc.utils
from mlflow.models import Model
from mlflow.pyfunc.model import PythonModel, PythonModelContext, get_default_conda_env
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils import PYTHON_VERSION, deprecated, get_major_minor_py_version
from mlflow.utils.file_utils import TempDir, _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
parms = deepcopy(kwargs)
parms[MAIN] = loader_module
parms[PY_VERSION] = PYTHON_VERSION
if code:
parms[CODE] = code
if data:
parms[DATA] = data
if env:
parms[ENV] = env
return model.add_flavor(FLAVOR_NAME, **parms)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def load_model(model_uri, suppress_warnings=True):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
if CODE in conf and conf[CODE]:
code_path = os.path.join(local_model_path, conf[CODE])
mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path)
data_path = os.path.join(local_model_path, conf[DATA]) if (DATA in conf) else local_model_path
return importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version, PYTHON_VERSION)
def spark_udf(spark, model_uri, result_type="double"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are
ordinals (0, 1, ...). On some versions of Spark, it is also possible to wrap the input in a
struct. In that case, the data will be passed as a DataFrame with column names given by the
struct definition (e.g. when invoked as my_udf(struct('x', 'y'), the model will ge the data as a
pandas DataFrame with 2 columns 'x' and 'y').
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
>>> predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
>>> df.withColumn("prediction", predict("name", "age")).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import ArrayType, DataType
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
if not isinstance(result_type, DataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE)
with TempDir() as local_tmpdir:
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=local_tmpdir.path())
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
def predict(*args):
model = SparkModelCache.get_or_load(archive_path)
schema = {str(i): arg for i, arg in enumerate(args)}
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception("If passing a StructType column, there should be only one "
"input column, but got %d" % len(args))
pdf = x
if pdf is None:
# Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)
columns = [str(i) for i, _ in enumerate(args)]
pdf = pandas.DataFrame(schema, columns=columns)
result = model.predict(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elif type(elem_type) == IntegerType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort,
np.int32]).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int, np.long])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series([row[1].values for row in result.iterrows()])
else:
return result[result.columns[0]]
return pandas_udf(predict, result_type)
def save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,
mlflow_model=Model(), python_model=None, artifacts=None, **kwargs):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. This decsribes the environment this model should
be run in. If ``python_model`` is not ``None``, the Conda environment must
at least specify the dependencies contained in
:func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the
model. The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'cloudpickle==0.5.8'
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
"""
mlflow_model = kwargs.pop('model', mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError('Argument code_path should be a list, not {}'.format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any([item is not None for item in first_argument_set.values()])
second_argument_set_specified = any([item is not None for item in second_argument_set.values()])
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set)),
error_code=INVALID_PARAMETER_VALUE)
elif (loader_module is None) and (python_model is None):
raise MlflowException(
message="Either `loader_module` or `python_model` must be specified!",
error_code=INVALID_PARAMETER_VALUE)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path, loader_module=loader_module, data_path=data_path,
code_paths=code_path, conda_env=conda_env, mlflow_model=mlflow_model)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path, python_model=python_model, artifacts=artifacts, conda_env=conda_env,
code_paths=code_path, mlflow_model=mlflow_model)
def log_model(artifact_path, loader_module=None, data_path=None, code_path=None, conda_env=None,
python_model=None, artifacts=None, registered_model_name=None):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. This decsribes the environment this model should
be run in. If ``python_model`` is not ``None``, the Conda environment must
at least specify the dependencies contained in
:func:`get_default_conda_env()`. If `None`, the default
:func:`get_default_conda_env()` environment is added to the
model. The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'cloudpickle==0.5.8'
]
}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: Note:: Experimental: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
"""
return Model.log(artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name)
def _save_model_with_loader_module_and_data_path(path, loader_module, data_path=None,
code_paths=None, conda_env=None,
mlflow_model=Model()):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path),
error_code=RESOURCE_ALREADY_EXISTS)
os.makedirs(path)
code = None
data = None
env = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
if code_paths is not None:
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir="code")
code = "code"
if conda_env is not None:
shutil.copy(src=conda_env, dst=os.path.join(path, "mlflow_env.yml"))
env = "mlflow_env.yml"
mlflow.pyfunc.add_to_model(
mlflow_model, loader_module=loader_module, code=code, data=data, env=env)
mlflow_model.save(os.path.join(path, 'MLmodel'))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
| 47.534045
| 100
| 0.648204
|
e66d2f3f651d9d6a81bbea4bd356354fc6b91e4a
| 2,226
|
py
|
Python
|
gpytorch/lazy/constant_mul_lazy_variable.py
|
ediphy-dwild/gpytorch
|
559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a
|
[
"MIT"
] | null | null | null |
gpytorch/lazy/constant_mul_lazy_variable.py
|
ediphy-dwild/gpytorch
|
559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a
|
[
"MIT"
] | null | null | null |
gpytorch/lazy/constant_mul_lazy_variable.py
|
ediphy-dwild/gpytorch
|
559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Variable
from .lazy_variable import LazyVariable
class ConstantMulLazyVariable(LazyVariable):
def __init__(self, lazy_var, constant):
if not isinstance(constant, Variable):
tensor_cls = lazy_var.tensor_cls
constant = Variable(tensor_cls(1).fill_(constant))
super(ConstantMulLazyVariable, self).__init__(lazy_var, constant)
self.lazy_var = lazy_var
self.constant = constant
def _matmul_closure_factory(self, *args):
lazy_var_closure = self.lazy_var._matmul_closure_factory(*args[:-1])
constant = args[-1]
def closure(rhs_mat):
res = lazy_var_closure(rhs_mat)
res = res * constant.expand_as(res)
return res
return closure
def _derivative_quadratic_form_factory(self, *args):
lazy_var_closure = self.lazy_var._derivative_quadratic_form_factory(*args[:-1])
constant = args[-1]
def closure(left_factor, right_factor):
res = list(lazy_var_closure(left_factor, right_factor))
for i, item in enumerate(res):
if torch.is_tensor(item) and res[i].sum():
res[i] = res[i] * constant.expand_as(res[i])
# Gradient with respect to the constant
res.append(left_factor.new(1).fill_((left_factor * right_factor).sum()))
return res
return closure
def _size(self):
return self.lazy_var.size()
def _transpose_nonbatch(self):
return ConstantMulLazyVariable(self.lazy_var._transpose_nonbatch(), self.constant)
def _batch_get_indices(self, batch_indices, left_indices, right_indices):
res = self.lazy_var._batch_get_indices(batch_indices, left_indices, right_indices)
return self.constant.expand_as(res) * res
def _get_indices(self, left_indices, right_indices):
res = self.lazy_var._get_indices(left_indices, right_indices)
return self.constant.expand_as(res) * res
def repeat(self, *sizes):
return ConstantMulLazyVariable(self.lazy_var.repeat(*sizes), self.constant)
def __getitem__(self, i):
return self.lazy_var.__getitem__(i) * self.constant
| 38.37931
| 90
| 0.674753
|
17ff4e5e6746e41e0c378b868f45c30a76e22378
| 1,341
|
py
|
Python
|
sami/nodes/own.py
|
sami-dca/Sami
|
796f73500bb4ee36a7023653d47d300e04a10217
|
[
"MIT"
] | 1
|
2020-11-14T10:52:33.000Z
|
2020-11-14T10:52:33.000Z
|
sami/nodes/own.py
|
sami-dca/Sami
|
796f73500bb4ee36a7023653d47d300e04a10217
|
[
"MIT"
] | null | null | null |
sami/nodes/own.py
|
sami-dca/Sami
|
796f73500bb4ee36a7023653d47d300e04a10217
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional
from ._base import Node
from ..utils import get_id
from ..design import Singleton
from ..config import Identifier
from ..cryptography.asymmetric import PrivateKey
class MasterNode(Node, Singleton):
def __init__(self, private_key: PrivateKey):
self.private_key: PrivateKey = private_key
self.public_key = self.private_key.get_public_key()
self._hash = self.public_key.get_public_key_hash()
self.sig = self._compute_own_sig()
super().__init__(self.public_key, self.sig)
def _compute_own_sig(self) -> str:
return self.private_key.get_signature(self._hash)
def _compute_id(self) -> Identifier:
return get_id(self._hash)
def export_private_key(self, directory: Path,
passphrase: Optional[str] = None) -> None:
file_path = directory / f"rsa_private_key-{self.id}.pem"
self.private_key._to_file(file_path, passphrase)
def is_private_key_loaded() -> bool:
"""
Checks whether the private key has been loaded.
If this methods returns False, then private databases and cryptographic
operations won't work.
"""
try:
MasterNode()
except TypeError:
# Catch "missing argument" errors
return False
else:
return True
| 29.8
| 75
| 0.685309
|
7a37d158fe1e53385aa28aed8bc81f9a1c34df05
| 2,093
|
py
|
Python
|
bmtrain/distributed/ops.py
|
zh-zheng/BMTrain
|
256b0835af8f2da6014ad0ebfcc22acc01a6f8ec
|
[
"Apache-2.0"
] | null | null | null |
bmtrain/distributed/ops.py
|
zh-zheng/BMTrain
|
256b0835af8f2da6014ad0ebfcc22acc01a6f8ec
|
[
"Apache-2.0"
] | null | null | null |
bmtrain/distributed/ops.py
|
zh-zheng/BMTrain
|
256b0835af8f2da6014ad0ebfcc22acc01a6f8ec
|
[
"Apache-2.0"
] | null | null | null |
import torch
from ..global_var import config
from ..nccl import allGather as ncclAllGather
from ..nccl import allReduce as ncclAllReduce
class OpAllGather(torch.autograd.Function):
@staticmethod
def forward(ctx, input : torch.Tensor):
if not input.contiguous():
input = input.contiguous()
output = torch.empty( (config['world_size'],) + input.size(), dtype=input.dtype, device=input.device)
offs = input.storage_offset()
ncclAllGather(
input.storage()[offs: offs + input.numel()],
output.storage(),
config['comm']
)
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output[config['rank']]
def all_gather(x : torch.Tensor):
assert x.is_cuda
return OpAllGather.apply(x)
class OpAllReduce(torch.autograd.Function):
@staticmethod
def forward(ctx, input : torch.Tensor, op : str):
if not input.contiguous():
input = input.contiguous()
output = torch.empty( input.size(), dtype=input.dtype, device=input.device)
offs = input.storage_offset()
ncclAllReduce(
input.storage()[offs: offs + input.numel()],
output.storage(),
op,
config['comm']
)
ctx.op = op
if op in ["sum", "avg"]:
pass
elif op in ["max", "min"]:
ctx.save_for_backward( input != output )
else:
ctx.save_for_backward( output / input )
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.op == "sum":
return grad_output, None
elif ctx.op == "avg":
return grad_output / config['world_size'], None
elif ctx.op in ["max", "min"]:
return torch.masked_fill(grad_output, ctx.saved_tensors[0], 0), None
else:
return grad_output * ctx.saved_tensors[0], None
def all_reduce(x : torch.Tensor, op : str = "sum"):
assert x.is_cuda
return OpAllReduce.apply(x, op)
| 29.069444
| 109
| 0.582895
|
584e62c366d8a28054384b898b87152145dc1dcc
| 1,852
|
py
|
Python
|
anthill/message/options.py
|
anthill-platform/anthill-message
|
847a766d24c53f642f8a08f55a7a2c8f03b69f51
|
[
"MIT"
] | null | null | null |
anthill/message/options.py
|
anthill-platform/anthill-message
|
847a766d24c53f642f8a08f55a7a2c8f03b69f51
|
[
"MIT"
] | null | null | null |
anthill/message/options.py
|
anthill-platform/anthill-message
|
847a766d24c53f642f8a08f55a7a2c8f03b69f51
|
[
"MIT"
] | null | null | null |
from anthill.common.options import define
# Main
define("host",
default="http://localhost:9511",
help="Public hostname of this service",
type=str)
define("listen",
default="port:9511",
help="Socket to listen. Could be a port number (port:N), or a unix domain socket (unix:PATH)",
type=str)
define("name",
default="message",
help="Service short name. User to discover by discovery service.",
type=str)
# MySQL database
define("db_host",
default="127.0.0.1",
type=str,
help="MySQL database location")
define("db_username",
default="root",
type=str,
help="MySQL account username")
define("db_password",
default="",
type=str,
help="MySQL account password")
define("db_name",
default="dev_message",
type=str,
help="MySQL database name")
# Messaging
define("message_broker",
default="amqp://guest:guest@127.0.0.1:5672/",
help="RabbitMQ broker location for messaging (amqp).",
group="message",
type=str)
define("message_broker_max_connections",
default=10,
help="Maximum connections to maintain.",
group="message",
type=int)
define("group_cluster_size",
default=1000,
type=int,
group="groups",
help="Cluster size to group users groups around")
define("message_incoming_queue_name",
default="message.incoming.queue",
help="RabbitMQ incoming queue name.",
group="message",
type=str)
define("message_prefetch_count",
default=32,
type=int,
group="message",
help="How much of messages can be prefetch")
define("outgoing_message_workers",
default=32,
type=int,
group="message",
help="How much workers process the outgoing messages")
| 23.15
| 101
| 0.62095
|
e7f276b227cc71788c467b1fe7fe702adb604b62
| 248
|
py
|
Python
|
app/tests/conftest.py
|
ribery77/Josh_Task
|
d4a2e42176d3fe11273fc964de7af37f93d1b2d6
|
[
"Apache-2.0"
] | null | null | null |
app/tests/conftest.py
|
ribery77/Josh_Task
|
d4a2e42176d3fe11273fc964de7af37f93d1b2d6
|
[
"Apache-2.0"
] | null | null | null |
app/tests/conftest.py
|
ribery77/Josh_Task
|
d4a2e42176d3fe11273fc964de7af37f93d1b2d6
|
[
"Apache-2.0"
] | null | null | null |
"""
conftest.py: It contents fixture functions used in tests.
"""
from app import create_app
import pytest
@pytest.fixture
def app():
app = create_app("testing")
return app
@pytest.fixture
def client(app):
return app.test_client()
| 13.777778
| 57
| 0.705645
|
b4ff7be897ddfb1e2f4fd68b4b8d170181690126
| 935
|
py
|
Python
|
tests/test_similarity.py
|
JohnlNguyen/multihead-siamese-nets
|
d6f2e67bae3858da6164b36e4889eadc800b9b1b
|
[
"MIT"
] | 2
|
2019-05-24T08:52:02.000Z
|
2022-01-10T21:38:08.000Z
|
tests/test_similarity.py
|
JohnlNguyen/multihead-siamese-nets
|
d6f2e67bae3858da6164b36e4889eadc800b9b1b
|
[
"MIT"
] | 10
|
2020-01-28T22:06:21.000Z
|
2022-02-10T00:16:53.000Z
|
tests/test_similarity.py
|
warisqr007/ConvSANN
|
0cede14601ce1a62bd58abf92a04ad3d7cc3be99
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from models.lstm import manhattan_similarity
class TestSimilarity(tf.test.TestCase):
def testManhattanSimilaritySame(self):
with self.test_session() as test_session:
x1 = np.array([[1., 1.]])
x2 = np.array([[1., 1.]])
siamese_lstm_model = manhattan_similarity(x1, x2)
actual_output = test_session.run(siamese_lstm_model)
correct_output = [1.]
self.assertEqual(actual_output, correct_output)
def testSimilarity2D(self):
with self.test_session() as test_session:
x1 = np.array([[1., 1.], [1., 1.]])
x2 = np.array([[1., 1.], [1., 1.]])
siamese_lstm_model = manhattan_similarity(x1, x2)
actual_output = test_session.run(siamese_lstm_model)
correct_output = [[1.], [1.]]
self.assertAllEqual(actual_output, correct_output)
| 33.392857
| 64
| 0.614973
|
ad4105a07a4225a3c238cf2635bec62d420bc657
| 16,977
|
py
|
Python
|
tasks/views.py
|
418sec/Django-CRM
|
8ed5e411844a74c7757f3f54374740eef3a4317c
|
[
"MIT"
] | 1
|
2020-05-03T13:29:06.000Z
|
2020-05-03T13:29:06.000Z
|
tasks/views.py
|
anandtiwarics/Django-CRM
|
a90d30217bf9b4b896a7ebe994382e96956181a1
|
[
"MIT"
] | 1
|
2020-03-27T17:02:11.000Z
|
2020-04-03T17:21:24.000Z
|
tasks/views.py
|
anandtiwarics/Django-CRM
|
a90d30217bf9b4b896a7ebe994382e96956181a1
|
[
"MIT"
] | 5
|
2020-03-21T09:55:05.000Z
|
2020-04-03T06:51:02.000Z
|
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
TemplateView, UpdateView, View)
from accounts.models import Account
from common.access_decorators_mixins import (MarketingAccessRequiredMixin,
SalesAccessRequiredMixin, marketing_access_required, sales_access_required)
from common.models import Attachments, Comment, User
from common.tasks import send_email_user_mentions
from contacts.models import Contact
from tasks.celery_tasks import send_email
from tasks.forms import TaskAttachmentForm, TaskCommentForm, TaskForm
from tasks.models import Task
from tasks.utils import *
from teams.models import Teams
@login_required
def get_teams_and_users(request):
data = {}
teams = Teams.objects.all()
teams_data = [{'team': team.id, 'users': [
user.id for user in team.users.all()]} for team in teams]
users = User.objects.all().values_list("id", flat=True)
data['teams'] = teams_data
data['users'] = list(users)
return JsonResponse(data)
@login_required
@sales_access_required
def tasks_list(request):
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
tasks = Task.objects.all().distinct().order_by('-created_on')
else:
tasks = Task.objects.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct().order_by('-created_on')
today = datetime.today().date()
return render(request, 'tasks_tasks_list.html', {'tasks': tasks, 'today': today, 'status_choices': STATUS_CHOICES, 'priority_choices': PRIORITY_CHOICES})
if request.method == 'POST':
tasks = Task.objects.filter().order_by('-created_on')
if request.user.role == 'ADMIN' or request.user.is_superuser:
tasks = tasks
else:
tasks = Task.objects.filter(created_by=request.user)
if request.POST.get('task_title', None):
tasks = tasks.filter(
title__icontains=request.POST.get('task_title'))
if request.POST.get('status', None):
tasks = tasks.filter(status=request.POST.get('status'))
if request.POST.get('priority', None):
tasks = tasks.filter(priority=request.POST.get('priority'))
tasks = tasks.distinct()
today = datetime.today().date()
return render(request, 'tasks_tasks_list.html', {'tasks': tasks, 'today': today, 'status_choices': STATUS_CHOICES, 'priority_choices': PRIORITY_CHOICES})
@login_required
@sales_access_required
def task_create(request):
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
accounts = Account.objects.filter(status="open")
# elif request.user.google.all():
# users = []
# accounts = Account.objects.filter(created_by=request.user).filter(status="open")
else:
users = User.objects.filter(role='ADMIN').order_by('email')
accounts = Account.objects.filter(Q(created_by=request.user) | Q(
assigned_to__in=[request.user])).filter(status="open")
form = TaskForm(request_user=request.user)
return render(request, 'task_create.html', {'form': form, 'users': users, 'accounts': accounts,
"teams": Teams.objects.all(),
})
if request.method == 'POST':
form = TaskForm(request.POST, request_user=request.user)
if form.is_valid():
task = form.save(commit=False)
task.created_by = request.user
task.save()
task.assigned_to.add(*request.POST.getlist('assigned_to'))
task.contacts.add(*request.POST.getlist('contacts'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist(
'teams')).values_list('users', flat=True)
assinged_to_users_ids = task.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
task.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
task.teams.add(*request.POST.getlist('teams'))
kwargs = {'domain': request.get_host(), 'protocol': request.scheme}
assigned_to_list = list(
task.assigned_to.all().values_list('id', flat=True))
send_email.delay(task.id, assigned_to_list, **kwargs)
success_url = reverse('tasks:tasks_list')
if request.POST.get('from_account'):
success_url = reverse('accounts:view_account', args=(
request.POST.get('from_account'),))
return JsonResponse({'error': False, 'success_url': success_url})
else:
return JsonResponse({'error': True, 'errors': form.errors})
@login_required
@sales_access_required
def task_detail(request, task_id):
task = get_object_or_404(Task, pk=task_id)
delete_task = (request.user == task.created_by) or (
request.user.role == 'ADMIN')
edit_or_view = (
delete_task or request.user.has_sales_access or request.user in task.assigned_to.all())
user_assigned_account = False
user_assigned_accounts = set(
request.user.account_assigned_users.values_list('id', flat=True))
if task.account:
task_accounts = set([task.account.id])
else:
task_accounts = set()
if user_assigned_accounts.intersection(task_accounts):
user_assigned_account = True
if not ((request.user.role == 'ADMIN') or
(request.user.is_superuser) or
(task.created_by == request.user) or
(request.user in task.assigned_to.all()) or
user_assigned_account):
raise PermissionDenied
if request.method == 'GET':
# if Task.objects.filter(id=task_id).exists():
# task = Task.objects.select_related('account').prefetch_related(
# 'assigned_to', 'contacts').get(id=task_id)
attachments = task.tasks_attachment.all()
comments = task.tasks_comments.all()
if request.user.is_superuser or request.user.role == 'ADMIN':
users_mention = list(User.objects.filter(
is_active=True).values('username'))
elif request.user != task.created_by:
users_mention = [{'username': task.created_by.username}]
else:
users_mention = list(task.assigned_to.all().values('username'))
return render(request, 'task_detail.html',
{'task': task, 'users_mention': users_mention,
'attachments': attachments, 'comments': comments,
'delete_task': delete_task, 'edit_or_view': edit_or_view})
@login_required
@sales_access_required
def task_edit(request, task_id):
task_obj = get_object_or_404(Task, pk=task_id)
accounts = Account.objects.filter(status="open")
if not (request.user.role == 'ADMIN' or request.user.is_superuser or task_obj.created_by == request.user or request.user in task_obj.assigned_to.all()):
raise PermissionDenied
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
elif request.user.google.all():
users = []
else:
users = User.objects.filter(role='ADMIN').order_by('email')
# form = TaskForm(request_user=request.user)
form = TaskForm(instance=task_obj, request_user=request.user)
return render(request, 'task_create.html', {'form': form, 'task_obj': task_obj,
'users': users, 'accounts': accounts, "teams": Teams.objects.all(), })
if request.method == 'POST':
form = TaskForm(request.POST, instance=task_obj,
request_user=request.user)
if form.is_valid():
task = form.save(commit=False)
previous_assigned_to_users = list(
task_obj.assigned_to.all().values_list('id', flat=True))
task.save()
form.save_m2m()
# task.assigned_to.clear()
# task.contacts.clear()
# task.assigned_to.add(*request.POST.getlist('assigned_to'))
# task.contacts.add(*request.POST.getlist('contacts'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist(
'teams')).values_list('users', flat=True)
assinged_to_users_ids = task.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
task.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
task.teams.clear()
task.teams.add(*request.POST.getlist('teams'))
else:
task.teams.clear()
kwargs = {'domain': request.get_host(), 'protocol': request.scheme}
assigned_to_list = list(
task.assigned_to.all().values_list('id', flat=True))
recipients = list(set(assigned_to_list) -
set(previous_assigned_to_users))
send_email.delay(task.id, recipients, **kwargs)
success_url = reverse('tasks:tasks_list')
if request.POST.get('from_account'):
success_url = reverse('accounts:view_account', args=(
request.POST.get('from_account'),))
return JsonResponse({'error': False, 'success_url': success_url})
else:
return JsonResponse({'error': True, 'errors': form.errors})
@login_required
@sales_access_required
def task_delete(request):
# task_obj = get_object_or_404(Task, pk=task_id)
task_obj = get_object_or_404(
Task, id=request.POST.get("task_id"))
if not (request.user.role == 'ADMIN' or request.user.is_superuser or task_obj.created_by == request.user):
raise PermissionDenied
if request.method == 'GET':
# task_obj.delete()
if request.GET.get('view_account', None):
return redirect(reverse('accounts:view_account', args=(request.GET.get('view_account'),)))
return redirect('tasks:tasks_list')
if request.method == 'POST':
task_obj.delete()
if request.GET.get('page'):
return redirect(reverse('accounts:view_account', args=(request.GET.get('view_account'),)))
return redirect('tasks:tasks_list')
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = TaskCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.task = get_object_or_404(
Task, id=request.POST.get('task_id'))
if (
request.user == self.task.created_by or request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to comment for this account."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.task = self.task
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'tasks', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_on_arrow": comment.commented_on_arrow,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = TaskCommentForm(request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'tasks', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class AddAttachmentView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = TaskAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.task = get_object_or_404(
Task, id=request.POST.get('task_id'))
if (
request.user == self.task.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to add attachment \
for this account."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.task = self.task
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"created_on": attachment.created_on,
"created_on_arrow": attachment.created_on_arrow,
"created_by": attachment.created_by.email,
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (
request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
self.object.delete()
data = {"acd": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data)
| 40.810096
| 161
| 0.619191
|
ddd1f132936fbc41aaf22a86cecec1efb8eaaab8
| 1,142
|
py
|
Python
|
volt/openstack/common/rpc/zmq_receiver.py
|
vmthunder/volt
|
df6e2c3820c424b5144950d07cce053ee401cff6
|
[
"Apache-2.0"
] | 2
|
2015-03-15T11:12:53.000Z
|
2018-10-12T03:05:52.000Z
|
volt/volt/openstack/common/rpc/zmq_receiver.py
|
vmthunder/packages
|
e530e243007a0f403cad1b67a490ffb9687969c3
|
[
"Apache-2.0"
] | null | null | null |
volt/volt/openstack/common/rpc/zmq_receiver.py
|
vmthunder/packages
|
e530e243007a0f403cad1b67a490ffb9687969c3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from volt.openstack.common import log as logging
from volt.openstack.common import rpc
from volt.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
| 29.282051
| 78
| 0.740806
|
0fb39b50c8d4c05e70b02806f718451b7ec4adbc
| 18,081
|
py
|
Python
|
visualize_co.py
|
gyungchan2110/Mask_RCNN
|
dc9967c6dd20c462624a6c45cca65dd9a32b4a71
|
[
"MIT"
] | null | null | null |
visualize_co.py
|
gyungchan2110/Mask_RCNN
|
dc9967c6dd20c462624a6c45cca65dd9a32b4a71
|
[
"MIT"
] | null | null | null |
visualize_co.py
|
gyungchan2110/Mask_RCNN
|
dc9967c6dd20c462624a6c45cca65dd9a32b4a71
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
import IPython.display
import utils
############################################################
# Visualization
############################################################
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None):
"""Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interporlation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
i = 1
for image, title in zip(images, titles):
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
plt.imshow(image.astype(np.uint8), cmap=cmap,
norm=norm, interpolation=interpolation)
i += 1
plt.show()
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
#random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(1):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None, dstPath = None, filename = None, truemask = None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
"""
#scores_ = np.asarray(scores)
# Number of instances
scores = np.asarray(scores)
maxvalue = np.amax(scores)
indece = []
for i, score in enumerate(scores) :
if (score == maxvalue):
indece.append(i)
indece = np.asarray(indece)
indece_tmp = []
index = -1
tmp = 1024
if(len(indece) == 1):
index = indece[0]
else:
for i in indece :
y1, x1, y2, x2,_ = bboxes[i]
if(x1 < 512 and y1 < tmp):
tmp = y1
indece_tmp.append(i)
tmp = 1024
if(len(indece_tmp) == 1):
index = indece_tmp[0]
else:
for i in indece_tmp :
y1, x1, y2, x2,_ = bboxes[i]
if(x1 < tmp):
tmp = x1
index = i
print("index", index)
#assert index < N and index >= 0
# if not N:
# print("\n*** No instances to display *** \n")
# else:
# assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
fig = plt.figure()
if not ax:
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Generate random colors
colors = random_colors(2)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
#for index in range(N):
color = colors[0]
#if not np.any(boxes[index]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
#continue
y1, x1, y2, x2 = boxes[index]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=0.5,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
class_id = class_ids[index]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
#caption = "{} {:.3f}".format(label, score) if score else label
#ax.text(x1, y1 + 8, caption,
# color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, index]
masked_image = apply_mask(masked_image, mask, color)
#truemask = np.asarray(truemask)
#color = colors[1]
#masked_image = apply_mask(masked_image, truemask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
padded_mask = np.zeros(
(truemask.shape[0] + 2, truemask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = truemask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=colors[1])
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
#ax.imshow(truemask, cmap='Blues', alpha = 0.5, interpolation = 'nearest')
plt.show()
fig.savefig(dstPath+"/"+filename[:-4] + "_fig.png", dpi = 1024)
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
ids = np.arange(rois.shape[0], dtype=np.int32)
ids = np.random.choice(
ids, limit, replace=False) if ids.shape[0] > limit else ids
fig, ax = plt.subplots(1, figsize=(12, 12))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
ax.axis('off')
for i, id in enumerate(ids):
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
m = utils.unmold_mask(mask[id], rois[id]
[:4].astype(np.int32), image.shape)
masked_image = apply_mask(masked_image, m, color)
ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, np.where(class_ids == class_id)[0]]
m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)
to_display.append(m)
titles.append(class_names[class_id] if class_id != -1 else "-")
display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r")
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
overlaps, class_names, threshold=0.5):
"""Draw a grid showing how ground truth objects are classified.
gt_class_ids: [N] int. Ground truth class IDs
pred_class_id: [N] int. Predicted class IDs
pred_scores: [N] float. The probability scores of predicted classes
overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes.
class_names: list of all class names in the dataset
threshold: Float. The prediction probability required to predict a class
"""
gt_class_ids = gt_class_ids[gt_class_ids != 0]
pred_class_ids = pred_class_ids[pred_class_ids != 0]
plt.figure(figsize=(12, 10))
plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
plt.yticks(np.arange(len(pred_class_ids)),
["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
for i, id in enumerate(pred_class_ids)])
plt.xticks(np.arange(len(gt_class_ids)),
[class_names[int(id)] for id in gt_class_ids], rotation=90)
thresh = overlaps.max() / 2.
for i, j in itertools.product(range(overlaps.shape[0]),
range(overlaps.shape[1])):
text = ""
if overlaps[i, j] > threshold:
text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
color = ("white" if overlaps[i, j] > thresh
else "black" if overlaps[i, j] > 0
else "grey")
plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
horizontalalignment="center", verticalalignment="center",
fontsize=9, color=color)
plt.tight_layout()
plt.xlabel("Ground Truth")
plt.ylabel("Predictions")
def draw_boxes(image, boxes=None, refined_boxes=None,
masks=None, captions=None, visibilities=None,
title="", ax=None):
"""Draw bounding boxes and segmentation masks with differnt
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
masks: [N, height, width]
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominant each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
"""
# Number of boxes
assert boxes is not None or refined_boxes is not None
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(12, 12))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
x = random.randint(x1, (x1 + x2) // 2)
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
# Masks
if masks is not None:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
def display_table(table):
"""Display values in a table format.
table: an iterable of rows, and each row is an iterable of values.
"""
html = ""
for row in table:
row_html = ""
for col in row:
row_html += "<td>{:40}</td>".format(str(col))
html += "<tr>" + row_html + "</tr>"
html = "<table>" + html + "</table>"
IPython.display.display(IPython.display.HTML(html))
def display_weight_stats(model):
"""Scans all the weights in the model and returns a list of tuples
that contain stats about each weight.
"""
layers = model.get_trainable_layers()
table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
for l in layers:
weight_values = l.get_weights() # list of Numpy arrays
weight_tensors = l.weights # list of TF tensors
for i, w in enumerate(weight_values):
weight_name = weight_tensors[i].name
# Detect problematic layers. Exclude biases of conv layers.
alert = ""
if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
alert += "<span style='color:red'>*** dead?</span>"
if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
alert += "<span style='color:red'>*** Overflow?</span>"
# Add row
table.append([
weight_name + alert,
str(w.shape),
"{:+9.4f}".format(w.min()),
"{:+10.4f}".format(w.max()),
"{:+9.4f}".format(w.std()),
])
display_table(table)
| 36.527273
| 99
| 0.572811
|
a6d22c27cc3b7abd2327a30bd8820a48512eaf32
| 10,682
|
py
|
Python
|
bullets.py
|
crisdesivo/Space-Wizard
|
fa13f91ab30c6ee26b1d1893a9ab208e3365768d
|
[
"Apache-2.0"
] | null | null | null |
bullets.py
|
crisdesivo/Space-Wizard
|
fa13f91ab30c6ee26b1d1893a9ab208e3365768d
|
[
"Apache-2.0"
] | null | null | null |
bullets.py
|
crisdesivo/Space-Wizard
|
fa13f91ab30c6ee26b1d1893a9ab208e3365768d
|
[
"Apache-2.0"
] | null | null | null |
from constants import *
from kivy.clock import Clock
from kivy.uix.image import Image
from random import randint
from math import *
from kivy.properties import NumericProperty
def attack_instance(attack, pos, directionX, directionY):
return attack["pattern"](pos=pos, attack=attack, directionX=directionX, directionY=directionY)
def remove_widget(widget):
parent=widget.parent
parent.remove_widget(widget)
class RotatedImage(Image):
angle = NumericProperty()
class Sprite(RotatedImage):
def __init__(self, **kwargs):
super(Sprite, self).__init__(**kwargs)
self.allow_stretch=True
self.texture.mag_filter='nearest'
self.size = self.texture.size
class Bullet(Sprite):
def __init__(self, **kwargs):
super(Bullet, self).__init__(**kwargs)
self.avoid=[]
self.poison=0
def hit(self,enemy):
return enemy.collide_point(self.center_x,self.center_y)
def make_hit(self,enemy):
enemy.take_hit(self)
remove_widget(self)
def out_of_range(self):
return self.x>Window.width or self.x<0 or self.y<-Window.height or self.y>2*Window.height
class Star(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Star, self).__init__(source=attack["image_source"],pos=pos)
self.x_velocity=randint(attack["min_x_speed"], attack["max_x_speed"])
self.y_velocity=randint(-attack["max_y_speed"], attack["max_y_speed"])
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.axis1=0
self.axis2=0
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
def update(self):
self.axis1=self.x_velocity
self.axis2=self.y_velocity
self.x+=self.axis1*self.directionX + self.axis2*self.directionY
self.y+=self.axis1*self.directionY - self.axis2*self.directionX
if self.out_of_range():
self.parent.remove_widget(self)
ice_star={
"damage":1,
"cooldown": 5,
"min_x_speed": 10,
"max_x_speed": 20,
"max_y_speed": 1,
"image_source": 'atlas://images/Master484/cyan_star',
"type": "ice",
"pattern": Star
}
class Bomb(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Bomb, self).__init__(source=attack["image_source"],pos=pos)
self.attack=attack
self.velocity=attack["velocity"]
self.y_velocity=0
self.starting_y_speed=attack["y_speed"]
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.axis1=0
self.axis2=0
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
def update(self):
self.axis1=self.velocity
self.axis2=self.y_velocity
self.x+=self.axis1*self.directionX + self.axis2*self.directionY
self.y+=self.axis1*self.directionY - self.axis2*self.directionX
if self.out_of_range():
self.parent.remove_widget(self)
def make_hit(self, enemy):
enemy.take_hit(self)
if self.damage>=1:
parent=self.parent
bomb1=attack_instance(pos=self.pos, attack=self.attack)
bomb1.damage=self.damage/2.0
bomb1.y_velocity=self.starting_y_speed
bomb1.avoid=self.avoid
bomb1.avoid.append(enemy)
bomb2=attack_instance(pos=self.pos, attack=self.attack)
bomb2.damage=self.damage/2.0
bomb2.y_velocity=-self.starting_y_speed
bomb2.avoid=self.avoid
bomb2.avoid.append(enemy)
parent.add_widget(bomb1)
parent.add_widget(bomb2)
parent.remove_widget(self)
else:
remove_widget(self)
fire_bomb={
"damage":10,
"velocity":5,
"cooldown":30,
"y_speed": 2,
"image_source": "atlas://images/Master484/red_bomb",
"pattern": Bomb
}
class Random(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Random,self).__init__(source=attack["image_source"],pos=pos)
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.velocity=attack["velocity"]
self.axis1=0
self.axis2=0
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
def update(self):
self.axis1=randint(0,self.velocity)
self.axis2=randint(-self.velocity,self.velocity)
self.x+=self.axis1*self.directionX + self.axis2*self.directionY
self.y+=self.axis1*self.directionY - self.axis2*self.directionX
if self.out_of_range():
remove_widget(self)
black_hole={
"damage":10,
"cooldown": 30,
"velocity": 10,
"image_source": "atlas://images/Master484/my_pellet",
"pattern": Random
}
class Quick(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Quick,self).__init__(source=attack["image_source"],pos=pos)
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.velocity=attack["velocity"]
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
self.axis1=0
self.axis2=0
self.angle=atan2(directionY,directionX)*180/pi
def update(self):
self.axis1=self.velocity
self.x+=self.axis1*self.directionX + self.axis2*self.directionY
self.y+=self.axis1*self.directionY - self.axis2*self.directionX
if self.out_of_range():
remove_widget(self)
fire_bullet={
"pattern": Quick,
"image_source": "atlas://images/Master484/quick_bullet",
"damage": 1,
"cooldown": 5,
"velocity": 20
}
class Gravity(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Gravity,self).__init__(source=attack["image_source"],pos=pos)
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.velocity=attack["velocity"]
self.poison=attack["poison"]
self.axis1=0
self.axis2=0
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
self.velocity_y=directionY*attack["velocity_y"]
self.gravity=gravity
def update(self):
self.x+=self.velocity*self.directionX
self.velocity_y+=self.gravity
self.y+=self.velocity_y
if self.out_of_range():
remove_widget(self)
venom_ball={
"damage": 5,
"velocity": 10,
"velocity_y": 10,
"cooldown": 60,
"image_source": 'atlas://images/Master484/snake',
"pattern": Gravity,
"poison":1
}
class Periodic(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Periodic,self).__init__(source=attack["image_source"],pos=pos)
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.velocity=attack["velocity"]
self.amplitude=attack["amplitude"]
self.frequency=attack["frequency"]
self.axis1=0
self.axis2=0
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
self.initial_position=pos
self.offset=randint(0,180)
self.t=0.0
def update(self):
self.axis1=self.t*self.velocity
self.axis2=self.amplitude*(sin( (self.t+self.offset)/self.frequency)-sin(self.offset/self.frequency))
self.x=self.initial_position[0]+self.axis1*self.directionX + self.axis2*self.directionY
self.y=self.initial_position[1]+self.axis1*self.directionY - self.axis2*self.directionX
self.t+=1.0
if self.out_of_range():
remove_widget(self)
leaf_storm={
"pattern": Periodic,
"image_source": "atlas://images/Master484/green_shuriken",
"damage": 2,
"cooldown": 10,
"velocity": Window.width*0.005,
"amplitude": Window.height*0.1,
"frequency": 5
}
whirlpool={
"pattern": Periodic,
"image_source": "atlas://images/Master484/bubble",
"damage": 12,
"cooldown": 30,
"velocity": Window.width*0.004,
"amplitude": Window.height*0.2,
"frequency": 30
}
fire_storm={
"pattern": Periodic,
"image_source": "atlas://images/Master484/red_bomb",
"damage": 1,
"cooldown": 10,
"velocity": Window.width*0.007,
"amplitude": Window.height*0.1,
"frequency": 5
}
class Fire(Bullet):
def __init__(self,pos,attack, directionX, directionY):
super(Fire, self).__init__(source=attack["image_source"],pos=pos)
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.initial_position=pos
self.angle=randint(-attack["angle"],attack["angle"])
self.velocity=attack["velocity"]
self.min_opacity=attack["opacity"]
self.duration=attack["duration"]
self.constant=attack["constant"]
self.t=0
self.axis1=0
self.axis2=0
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
def update(self):
self.t+=1
self.axis1=self.velocity*cos(2*pi*self.angle/360)
self.axis2=self.velocity*sin(2*pi*self.angle/360)*self.constant**self.t
self.x+=self.axis1*self.directionX + self.axis2*self.directionY
self.y+=self.axis1*self.directionY - self.axis2*self.directionX
self.opacity-=(1-self.min_opacity)/self.duration
if self.t>self.duration or self.out_of_range():
remove_widget(self)
fire_breath={
"pattern": Fire,
"image_source": "atlas://images/Master484/red_bomb",
"damage": 1,
"cooldown": 4,
"angle": 30,
"velocity": 10,
"duration": 16,
"constant":0.99,
"opacity": 0.1
}
bubbles={
"pattern": Fire,
"image_source": "atlas://images/Master484/bubble",
"damage": 10,
"cooldown": 12,
"angle": 60,
"velocity": 6,
"duration": 50,
"constant":0.9,
"opacity": 0.5
}
lightning={
"pattern": Fire,
"image_source": "atlas://images/Master484/yellow_rod1",
"damage": 1,
"cooldown": 2,
"angle": 45,
"velocity": 25,
"duration": 7,
"constant":1,
"opacity": 1
}
class Mirror(Bullet):
def __init__(self, pos, attack, directionX, directionY):
super(Mirror, self).__init__(source=attack["image_source"],pos=pos)
self.size=(attack["width"],attack["height"])
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.directionX=directionX/sqrt(directionX**2+directionY**2)
self.directionY=directionY/sqrt(directionX**2+directionY**2)
self.axis1=0
self.axis2=0
self.x_velocity=attack["velocity"]
def hit(self,enemy):
return enemy.collide_point(self.center_x, self.center_y)
def update(self):
self.axis1=self.x_velocity
self.y=self.parent.y+shots_y_offset
self.x+=self.axis1*self.directionX
if self.out_of_range():
remove_widget(self)
sync_shot={
"pattern": Mirror,
"image_source": "atlas://images/Master484/mirror_bullet",
"width": 50,
"height": 40,
"damage": 5,
"cooldown": 150,
"velocity": 5
}
class Homing(Bullet):
def __init__(self,pos,attack, directionX, directionY):
super(Homing, self).__init__(source=attack["image_source"],pos=pos)
self.damage=attack["damage"]
self.cooldown=attack["cooldown"]
self.velocity=attack["velocity"]
def update(self):
enemy=self.parent.enemies[0]
dx=enemy.center_x-self.x
dy=enemy.center_y-self.y
distance=sqrt(dx**2+dy**2)
self.x+=self.velocity*dx/distance
self.y+=self.velocity*dy/distance
flamenco={
"image_source": "atlas://images/Master484/pink_shuriken",
"pattern": Homing,
"damage": 2,
"cooldown": 50,
"velocity": 20
}
| 28.948509
| 103
| 0.732353
|
47e3ec2718402cb4fcb15aed8571af4a87e5fcc8
| 157
|
py
|
Python
|
examples/example_package/__init__.py
|
zthurman/yalow
|
144ccc54b59b8a6c0567d713ba51d9a26ce75085
|
[
"Apache-2.0"
] | null | null | null |
examples/example_package/__init__.py
|
zthurman/yalow
|
144ccc54b59b8a6c0567d713ba51d9a26ce75085
|
[
"Apache-2.0"
] | 2
|
2020-03-14T02:45:45.000Z
|
2020-03-14T03:53:21.000Z
|
examples/example_package/__init__.py
|
zthurman/yalow
|
144ccc54b59b8a6c0567d713ba51d9a26ce75085
|
[
"Apache-2.0"
] | null | null | null |
import logging
module_logger = logging.getLogger('logalog.example_package')
def super_mega_ukulele():
module_logger.error(f'And its minion is HUGE!')
| 19.625
| 60
| 0.77707
|
4dcaa1bd019cbf0cdf083f15bbf14632160c5df1
| 63,444
|
py
|
Python
|
fibers.py
|
lm2-poly/OpenFiberSeg
|
0dbe27158a13b0ef9247e7353d3b2e651a545852
|
[
"MIT"
] | 1
|
2022-03-19T17:14:04.000Z
|
2022-03-19T17:14:04.000Z
|
fibers.py
|
lm2-poly/OpenFiberSeg
|
0dbe27158a13b0ef9247e7353d3b2e651a545852
|
[
"MIT"
] | null | null | null |
fibers.py
|
lm2-poly/OpenFiberSeg
|
0dbe27158a13b0ef9247e7353d3b2e651a545852
|
[
"MIT"
] | null | null | null |
# by Facundo Sosa-Rey, 2021. MIT license
import numpy as np
import scipy
from trackingFunctions import knn,knn3D
import time
import multiprocessing
class trackedCenterPointsObj:
def __init__(self,nSlices,offset):
#points are indexed by imSlice
self.points={}
for iSlice in range(offset,nSlices+offset):
self.points[iSlice]={}
self.points[iSlice]["x"]=[]
self.points[iSlice]["y"]=[]
self.points[iSlice]["fiberID"]=[]
def append(self,iSlice,x,y,fiberID):
iSlice=int(iSlice)
if fiberID not in self.points[iSlice]["fiberID"]:
self.points[iSlice]["x"].append(float(x))
self.points[iSlice]["y"].append(float(y))
self.points[iSlice]["fiberID"].append(fiberID)
if len(self.points[iSlice]["x"])!=len(self.points[iSlice]["fiberID"]):
raise ValueError("inconsistent number of elements in points vs fiberIDs")
def reject(self,targetPointsObj,iSlice,x,y,fiberID):
iSlice=int(iSlice)
if fiberID in self.points[iSlice]["fiberID"]:
if self.points[iSlice]["fiberID"].count(fiberID)>1:
raise ValueError("same fiberID occurs more that once in a single imSlice, should never happen")
indexToPop=self.points[iSlice]["fiberID"].index(fiberID)
self.points[iSlice]["x"].pop(indexToPop)
self.points[iSlice]["y"].pop(indexToPop)
self.points[iSlice]["fiberID"].pop(indexToPop)
targetPointsObj.append(iSlice,x,y,fiberID)
def transferID(self,iSlice,oldID,newID):
self.points[int(iSlice)]["fiberID"]=\
[newID if fibID==oldID else fibID for fibID in self.points[iSlice]["fiberID"]]
if self.points[int(iSlice)]["fiberID"].count(newID)>2:
raise RuntimeError("same fiberID occurs more than once, should never happen")
def getPoints(self):
return self.points
def getPointsInSlice(self,imSlice):
#needs to return a list of numpy array [x,y]
pointsObj=self.points[imSlice]
points=[]
fiberIDs=[]
for iCt in range(len(pointsObj["x"])):
points.append(
np.array(
[ pointsObj["x"][iCt],pointsObj["y"][iCt] ]))
fiberIDs.append(
pointsObj["fiberID"][iCt]
)
return np.array(points),fiberIDs
def centerPoint_to_tuple():
pass
class LUT_fiberID_to_centerPointObj:
# this LUT needs a custom class so as to make the same call to append()
# if the fiberObj is new, makes a new list of centerPoints
# if the fiberObj exist, adds centerpoints to list
def __init__(self):
self.LUT={}
def append(self,fiberID,centerPointTuple):
if fiberID in self.LUT.keys():
self.LUT[fiberID].append(centerPointTuple)
else:
self.LUT[fiberID]=[centerPointTuple]
class fiberObj:
@classmethod
def initializeClassAttributes(cls,savedAttributes=None):
# Putting all class attributes in a dict makes for a lighter printout of fiberObj in IDE.
# For debugging, class attributes are often not needed)
if savedAttributes is None:
cls.classAttributes={
"exclusiveZone" :[],
"legendLabels" :{"basic"},
"interpolatedCenters" :{},
"backTracking" :{},
"colors":{
# color by type of fiber
"basic" :(0.20 ,0.20 ,1.0 ), # dark blue
"stitched_blind(extended)" :(1. ,0.1 ,1. ), # pink
"stitched_blind(added)" :(0.14 ,0.9 ,0.14 ), # green
"stitched_smart(extended)" :(0.47 ,0.04 ,0.14 ), # burgundy
"stitched_smart(added)" :(0.55 ,0.76 ,1.00 ), # cyan
"stitched(initial)" :(1. ,1. ,0. ), # yellow
"too short" :(0.8 ,0.8 ,0.8 ), # grey #if string key is changed, modify in getLegendLabel as well
"too steep" :(0.9 ,0.4 ,0.4 ), # orange
"backTracking" :(0.65 ,0.04 ,1. ), # violet
# colors for all permutations combined
"permuted123" :(0. ,0.46 ,0.69 ), # cerulean Blue
"permuted132" :(0. ,0.50 ,0. ), # India Green
"permuted321" :(1. ,0.50 ,0. ) # orange
},
"LUT_fiberID_to_color" :{},
"listFiberIDs_tracked" :set([])
}
else:
# load from file
cls.classAttributes=savedAttributes
@classmethod
def setExclusiveZone(cls,bounds):
cls.classAttributes["exclusiveZone"]=bounds
@classmethod
def getExclusiveZone(cls):
return cls.classAttributes["exclusiveZone"]
@classmethod
def initLUTs(cls):
cls.classAttributes["LUT_centerPoint_to_fiberID"] ={}
cls.classAttributes["LUT_fiberID_to_centerPoint"] =LUT_fiberID_to_centerPointObj()
cls.classAttributes["LUT_fiberID_startCenterPoint"] ={}
cls.classAttributes["LUT_fiberID_endCenterPoint"] ={}
@classmethod
def clearLUTs(cls):
del cls.classAttributes["LUT_centerPoint_to_fiberID"]
del cls.classAttributes["LUT_fiberID_to_centerPoint"]
del cls.classAttributes["LUT_fiberID_startCenterPoint"]
del cls.classAttributes["LUT_fiberID_endCenterPoint"]
@classmethod
def initializeFromLUT(cls,centerPoints,listSlicesLUT,LUT_id_bottom,LUT_id_top,offset):
if len(LUT_id_bottom)!=len(LUT_id_top):
raise ValueError("look up tables of incompatible size")
cls.initLUTs()
# initialize the trackedCenterPoints object for all slices in volume
cls.initTrackedCenterPoints(nSlices=len(LUT_id_bottom)+1,offset=offset)
fiberStruct={}
for i,imSlice in enumerate(listSlicesLUT):
if i==0:
for iCt in range(len(LUT_id_bottom[0])):
#first pass of creating fiberObjs, hashed by fiberID, which here correspond to the index of startPnt.
fiberStruct[iCt]=fiberObj(
iCt, # fiberID
centerPoints[imSlice][LUT_id_bottom[0][iCt]][0], # xcoord
centerPoints[imSlice][LUT_id_bottom[0][iCt]][1], imSlice) # ycoord ,zcoord+offset for exclusize zone with zMin>0
# imSlice==0 for bottom, imSlice==1 for top
centerPointBottomTuple= (imSlice, LUT_id_bottom[0][iCt])
centerPointTopTuple = (imSlice+1, LUT_id_top [0][iCt])
cls.classAttributes["LUT_centerPoint_to_fiberID"][centerPointTopTuple]=iCt
cls.classAttributes["LUT_fiberID_to_centerPoint"].append(iCt,centerPointTopTuple)
cls.classAttributes["LUT_fiberID_startCenterPoint"][iCt]=centerPointBottomTuple
cls.classAttributes["LUT_fiberID_endCenterPoint"][iCt] =centerPointTopTuple
else:
for iCt in range(len(LUT_id_bottom[i])):
centerPointBottomTuple = (imSlice ,LUT_id_bottom [i][iCt])
centerPointTopTuple = (imSlice+1,LUT_id_top [i][iCt])
temp=centerPoints[imSlice][LUT_id_bottom[i][iCt]]
xFloat=float(temp[0])
yFloat=float(temp[1])
if centerPointBottomTuple in cls.classAttributes["LUT_centerPoint_to_fiberID"].keys():
# add to existant fiberObj:
iFib=cls.classAttributes["LUT_centerPoint_to_fiberID"][centerPointBottomTuple]
cls.classAttributes["LUT_centerPoint_to_fiberID"][centerPointTopTuple]=iFib
cls.classAttributes["LUT_fiberID_to_centerPoint"].append(iFib,centerPointTopTuple)
# point to new endCenterPoint
cls.classAttributes["LUT_fiberID_endCenterPoint"][iFib] =centerPointTopTuple
fiberStruct[iFib].append(xFloat,yFloat,imSlice)
else:
#create new fiberObj
fiberIDnew=len(fiberStruct)
# fNum xcoord ycoord zcoord
fiberStruct[fiberIDnew]=fiberObj(fiberIDnew, xFloat, yFloat, imSlice)
cls.classAttributes["LUT_centerPoint_to_fiberID"][centerPointTopTuple]=fiberIDnew
cls.classAttributes["LUT_fiberID_to_centerPoint"].append(fiberIDnew,centerPointTopTuple)
cls.classAttributes["LUT_fiberID_startCenterPoint"][fiberIDnew]=centerPointBottomTuple
cls.classAttributes["LUT_fiberID_endCenterPoint"] [fiberIDnew] =centerPointTopTuple
listFibers,startPnts,endPnts=cls.getAllEndPoints(centerPoints,fiberStruct)
# add endPnts to fiber objects (fibers were created by adding bottom centerPoints only, need to add last point on top )
for i,fib in fiberStruct.items():
fib.append(*endPnts[i])
#LUT are no longer needed, delete
cls.clearLUTs()
return fiberStruct
@classmethod
def getAllEndPoints(cls,centerPoints,fiberStruct):
startPnts =[]
endPnts =[]
cls.classAttributes["listFiberID"]=np.zeros(len(fiberStruct),np.int32)
for i,iFibObj in fiberStruct.items():
fibID=iFibObj.fiberID
cls.classAttributes["listFiberID"][i]=fibID
startCenterPointTuple=cls.classAttributes["LUT_fiberID_startCenterPoint"][fibID]
endCenterPointTuple =cls.classAttributes["LUT_fiberID_endCenterPoint"] [fibID]
z=int(startCenterPointTuple[0])
xy=centerPoints[z][startCenterPointTuple[1]]
x=int(xy[0])
y=int(xy[1])
startPnts.append(np.array([x,y,z]))
z=int(endCenterPointTuple[0])
xy=centerPoints[z][endCenterPointTuple[1]]
x=int(xy[0])
y=int(xy[1])
endPnts.append(np.array([x,y,z]))
cls.classAttributes["startPnts"]=np.array(startPnts)
cls.classAttributes["endPnts"] =np.array(endPnts)
return cls.classAttributes["listFiberID"],cls.classAttributes["startPnts"],cls.classAttributes["endPnts"]
@classmethod
def blindStitching(cls,fiberStruct,blindStitchingMaxDistance,
blindStitchingMaxLateralDist,verboseHandle):
from trackingFunctions import knn3D
### 3D Knn on endpoints only
print("\t\n##############################\n")
print("\t3D simultaneous matching\n\n")
# create KDTree with all top centerPoints
knnObj=knn3D(cls.classAttributes["startPnts"])
id_bottom,id_top,repDict,rejectPositions=knnObj.query(
cls.classAttributes["endPnts"],blindStitchingMaxDistance)
# the points that are matched correctly (not a fiber to itself)
# are checked for backtracking and lateral distance
testSelfMatch=id_bottom==id_top
mask_GoodMatches3D=list(np.where(~testSelfMatch)[0])
# if any of the non self-matched indices have repetitions, delete them
rejectPositions.sort(reverse=True)
for i in rejectPositions:
if i in mask_GoodMatches3D:
delPos=mask_GoodMatches3D.index(i)
del mask_GoodMatches3D[delPos]
#test to see if more than one match in top
l_top=list(id_top[mask_GoodMatches3D])
repetitions=[(index,val) for index,val in enumerate(id_top[mask_GoodMatches3D]) if l_top.count(val)>1]
if len(repetitions)>0:
raise RuntimeError("there are multiple matches left after deletions, should never happen")
stitchingChains={}
stitchingChains_endFib={}
intermediarySegments=set([])
# to deal with multiple stitching (chains of fiberObj), a stitchingChain object is
# created, indexed by the fiber_ID of the tip of the chain (fiberObj with highest z)
# if a new stitch is required from this chain tip, the chain is poped, and then added
# to a new chain with the new tip. This way, if a chain is D(top)->C->B->A(bottom),
# the object A receives the points from the combined object (DCB), not B alone,
# when extend() is performed. A is the only one considered "real".
# the other ones are kept for plotting (diagnostic) purposes
if len(id_bottom)>0:
#match found
for index,iBottom in enumerate(id_bottom[mask_GoodMatches3D]):
iTop=id_top[mask_GoodMatches3D[index]]
singleEndPnt=cls.classAttributes["endPnts"][iBottom]
singleStartPnt=cls.classAttributes["startPnts"][iTop]
#check if backtracking occurs:
if singleEndPnt[2]<singleStartPnt[2]:
#check if lateral distance is below threshold:
lateralDist=np.sqrt(
(singleEndPnt[0]-singleStartPnt[0])**2+\
(singleEndPnt[1]-singleStartPnt[1])**2)
if lateralDist<blindStitchingMaxLateralDist:
#create stitchingChains for fiberObj passing both tests
fiberID_end =cls.classAttributes["listFiberID"][iBottom]
fiberID_start=cls.classAttributes["listFiberID"][iTop]
if fiberID_end in stitchingChains.keys():
tempList=stitchingChains.pop(fiberID_end)
intermediarySegments.add(fiberID_end)
tempList=[fiberID_end]+tempList
stitchingChains[fiberID_start]=tempList
stitchingChains_endFib[tempList[-1]]=fiberID_start
else:
stitchingChains[fiberID_start]=[fiberID_end]
stitchingChains_endFib[fiberID_end]=fiberID_start
if verboseHandle:
print("endPnt (bottom):", singleEndPnt)
print("startPnt (top) :", singleStartPnt)
print("lateralDist",lateralDist)
print("####################")
endPnts =list(cls.classAttributes["endPnts"])
listFiberID_end =list(cls.classAttributes["listFiberID"])
startPnts =list(cls.classAttributes["startPnts"])
listFiberID_start=list(cls.classAttributes["listFiberID"])
print("\t3D simultaneous matching resulted in {} successful matches out of {} fiberObjs\n".\
format(len(stitchingChains),len(fiberStruct)))
#delete point that were correctly matched (not self-matched) on first pass of knn3D()
#deleting in decreasing order of indices wont affect positions below deletion
for ele in sorted(id_bottom[mask_GoodMatches3D], reverse = True):
del endPnts[ele]
del listFiberID_end[ele]
for ele in sorted(id_top[mask_GoodMatches3D], reverse = True):
del startPnts[ele]
del listFiberID_start[ele]
endPnts =np.array(endPnts)
startPnts=np.array(startPnts)
listFiberID_end =np.array(listFiberID_end)
listFiberID_start=np.array(listFiberID_start)
print("\n\t##############################")
print("\n\tsequential matching started")
# create KDTree only with centerPoints that were self-matched
knnObjSelfMatched=knn3D(startPnts)
# Query by specifying k=5 nearest neighbors, which signals to check for self matching,
# and returns closest match that is not self-matching
id_bottom,id_top,matches=knnObjSelfMatched.query(
endPnts,
blindStitchingMaxDistance,
blindStitchingMaxLateralDist,
k=5)
if len(id_bottom)>0:
for i in range(len(id_bottom)):
fiberID_start=listFiberID_start[id_top [i]]
fiberID_end =listFiberID_end [id_bottom[i]]
if fiberID_start in intermediarySegments or fiberID_end in intermediarySegments:
print("Skipping this match: branching of stitching chains. incoherent matching of neighbors: {}->{}".\
format(fiberID_start,fiberID_end))
else:
if fiberID_start in stitchingChains_endFib.keys():
if fiberID_end in stitchingChains.keys():
#case of new link being between two existing stitchingChains
existingStart=stitchingChains_endFib[fiberID_start]
tempListTop=stitchingChains.pop(existingStart)
tempListBottom=stitchingChains.pop(fiberID_end)
tempList=tempListTop+[fiberID_end]+tempListBottom
stitchingChains[existingStart]=tempList
stitchingChains_endFib.pop(fiberID_start)
stitchingChains_endFib[tempList[-1]]=existingStart
for fibID in tempList[:-1]:
intermediarySegments.add(fibID)
else:
# new link at bottom of existing stitchingChain
existingStart=stitchingChains_endFib[fiberID_start]
tempList=stitchingChains.pop(existingStart)
for fibID in tempList:
intermediarySegments.add(fibID)
tempList=tempList+[fiberID_end]
stitchingChains[existingStart]=tempList
stitchingChains_endFib.pop(fiberID_start)
stitchingChains_endFib[fiberID_end]=existingStart
elif fiberID_end in stitchingChains.keys():
# new link at top of existing stitchingChain
tempList=stitchingChains.pop(fiberID_end)
tempList=[fiberID_end]+tempList
stitchingChains[fiberID_start]=tempList
stitchingChains_endFib[tempList[-1]]=fiberID_start
else:
# start new stitchingChain
stitchingChains[fiberID_start]=[fiberID_end]
stitchingChains_endFib[fiberID_end]=fiberID_start
if verboseHandle:
lateralDist=matches[id_bottom[i]][1][1]
distanceTotal= matches[id_bottom[i]][1][0]
print("endPnt (bottom):", endPnts [id_bottom[i]], "no: ", id_bottom[i])
print("startPnt (top) :", startPnts [id_top [i]], "no: ", id_top [i])
print( "endID:",fiberID_end,"startID:", fiberID_start)
print("lateralDist",lateralDist)
print("distanceTotal",distanceTotal)
print("####################")
stitchedListCache_fiberID =set([])
# combine fiberObj in stitchingChains
if len(stitchingChains)>0:
# check total number of possible stitches, output to console
numStitchesTotal=0
for chainLinks in stitchingChains.values():
numStitchesTotal+=len(chainLinks)
numStitchesAttempted=0
for chainEnd in stitchingChains.keys():
extension_fiberID=chainEnd
#extend by starting at end of chain, working all the way to the front.
chainLinks=stitchingChains[chainEnd]
while chainLinks:
segmentToExtend_fiberID=chainLinks.pop(0)
numStitchesAttempted+=1
extendSuccessful=\
fiberStruct[segmentToExtend_fiberID].extendFiber(
fiberStruct[extension_fiberID],
(segmentToExtend_fiberID,
extension_fiberID),
stitchingType="blind",
checkIfInSegt=False
)
if extendSuccessful:
#keep indices in list so smart stitching is not attempted on fictitious segments
# (the "extensions" fiberObj are kept in fiberStruc for plotting purposes)
stitchedListCache_fiberID .add(extension_fiberID)
extension_fiberID=segmentToExtend_fiberID
print("BlindStitching: attempted/total: {}/{}".format(numStitchesAttempted,numStitchesTotal))
print("\ttotal number of successful stitches in blindStitching(): {} out of {} fiberObjs\n".\
format(len(stitchedListCache_fiberID),len(fiberStruct)))
return stitchedListCache_fiberID
@staticmethod
def smartStitching(fiberStructMain,
smartStitchingMinFibLength,
smartStitchingMaxDistance,
smartStitchingMaxLateralDist,
smartStitchingAlignAngle,
smartStitchingBackTrackingLimit,
processingMinFiberLength,
tagAngleTooSteep,
maxSteepnessAngle,
verboseHandle=False,
checkIfInSegt=True,
createNewPoints=True,
stitchingType="smart",
preventSelfStitch=False
):
tic=time.perf_counter()
endPnts={}
startPnts={}
orientationVec={}
coordinateTuplesEnd=[]
coordinateTuplesStart=[]
fiberID_list=[]
lengths=[]
if preventSelfStitch:
# in "smart_transposed" or "smart_lastPass":
# only fibers from different permutations are allowed to be stitched at this stage
in123_bottom=[]
in123_top=[]
in132_bottom=[]
in132_top=[]
in321_bottom=[]
in321_top=[]
suffixCheck=(
in123_bottom,
in123_top,
in132_bottom,
in132_top,
in321_bottom,
in321_top
)
else:
in123_bottom=in123_top=in132_bottom=in132_top=in321_bottom=in321_top=None
suffixCheck=None
for fibID,fObj in fiberStructMain.items():
#gather data for ranking matches in smartStitching()
if fObj.totalLength>smartStitchingMinFibLength:
endPnts [fibID]=fObj.endPnt
startPnts [fibID]=fObj.startPnt
orientationVec[fibID]=fObj.orientationVec.copy()/np.linalg.norm(fObj.orientationVec)
lengths.append(fObj.totalLength)
coordinateTuplesEnd .append((*endPnts [fibID], *orientationVec[fibID]))
coordinateTuplesStart.append((*startPnts[fibID], *orientationVec[fibID]))
fiberID_list.append(fibID)
if preventSelfStitch:
in123_bottom.append(fObj.suffix==0.123)
in123_top .append(fObj.suffix==0.123)
in132_bottom.append(fObj.suffix==0.132)
in132_top .append(fObj.suffix==0.132)
in321_bottom.append(fObj.suffix==0.321)
in321_top .append(fObj.suffix==0.321)
fiberStructExtended={}
# create kdTree on terminal points of main fiberObjs only
if not coordinateTuplesStart:
return fiberStructExtended,{"6-D knn search only":None }
knnObj6D=knn3D(coordinateTuplesStart)
id_bottom,id_top,matches=knnObj6D.query(
coordinateTuplesEnd,
smartStitchingMaxDistance,
smartStitchingMaxLateralDist,
smartStitchingAlignAngle,
smartStitchingBackTrackingLimit,
lengths=lengths,
k=20,#number of considered neighbors
suffixCheck=suffixCheck
)
times_tracking={"6-D knn search only":time.strftime("%Hh%Mm%Ss", time.gmtime(time.perf_counter()-tic)) }
print("6-D knn search performed in: {: >0.4f}s".format(time.perf_counter()-tic))
for i in range(len(id_bottom)):
fibID_end =fiberID_list[id_bottom[i]]
fibID_start=fiberID_list[ id_top[i]]
if verboseHandle:
lateralDistTest=matches[id_bottom[i]][1][1]
distanceTotal= matches[id_bottom[i]][1][0]
angle=np.degrees(np.arccos(matches[id_bottom[i]][1][3]))
print("bottom: \t{: >4} top: \t{: >4}, \tdistanceTotal: {: >5.4f}, \tdistLateral: {: >5.4f}, \tangle: {: >5.4f}".format(
fiberStructMain[fibID_end ].fiberID,
fiberStructMain[fibID_start].fiberID,
distanceTotal,
lateralDistTest,
angle)
)
# attempt to combine fiber objects into the initial segment
if fiberStructMain[fibID_end].addedTo:
# if this fiber already has been stitched to another -> point to actual initial segment
fibID_temp=fibID_end
fibIDcache={fibID_temp}
while fiberStructMain[fibID_temp].addedTo:
fibID_temp=fiberStructMain[fibID_temp].addedTo[0]
if fibID_temp in fibIDcache:
raise RuntimeError("circular \"addedTo\" loop")
fibIDcache.add(fibID_temp)
mainFiberID=fibID_temp
addInitial=False
else:
# this is the first extension of this fiber:
# create a new fiber for the purpose of plotting
# the starting fiberStruc before extension
fiberStructTemp=fiberStructMain[fibID_end].copy()
addInitial=True
mainFiberID=fibID_end
if stitchingType =="smart":
suffix=None
colorInitial ="stitched(initial)"
tagInitial ="initial_stitched_segment"
tagExtended ="stitched_smart(extended)"
doTrimming =True
elif stitchingType =="smart_transposed":
suffix=mainFiberID%1
colorInitial ="stitched(initial)_transposed"
tagInitial ="initial_stitched_segment_transposed"
tagExtended ="stitched_smart(extended)_transposed"
doTrimming =False
elif stitchingType =="smart_lastPass":
suffix=mainFiberID%1
colorInitial ="stitched(initial)_lastPass"
tagInitial ="initial_stitched_segment_lastPass"
tagExtended ="stitched_smart(extended)_lastPass"
doTrimming =False
else:
raise ValueError("not implemented for stitchingType: {}".format(stitchingType))
if fiberStructMain[fibID_start].addedTo:
raise RuntimeError("attempting to add to a fiber when already part of one stitching chain")
extendSuccessful=fiberStructMain[mainFiberID].extendFiber(
fiberStructMain[fibID_start],
(mainFiberID,fibID_start),
stitchingType,
checkIfInSegt=checkIfInSegt,
createNewPoints=createNewPoints,
suffix=suffix,
fibersAll=fiberStructMain,
doTrimming=doTrimming
)
if extendSuccessful:#keep extension if majority of new centerPoints are in region segmented as fiber
if mainFiberID==fibID_end:
print("\tSmart stitching of fiberID:{} (bottom) extended by {} (top)".\
format(mainFiberID,fibID_start) )
else:
print("\tSmart stitching of fiberID:{} (bottom) extended by {} (top), via intermediary fiber {}".\
format(mainFiberID,fibID_start,fibID_end) )
if addInitial:#if this is the first extension of this fiber
#create a new fiber for the sole purpose of seeing the starting fiberStruc before extension
if fiberStructExtended:
nextFiberID=max(fiberStructExtended.keys())+1
else:
nextFiberID=max(fiberStructMain.keys())+1
fiberStructExtended[nextFiberID]=fiberStructTemp
fiberStructExtended[nextFiberID].setColor(colorInitial)
fiberStructExtended[nextFiberID].tags.add(tagInitial)
fiberStructExtended[nextFiberID].originalFiberID=fibID_end
if tagExtended in fiberStructExtended[nextFiberID].tags:
raise ValueError("fiber branching")
fiberObj.addLegendLabel(colorInitial)
#pointer towards initial segment, for plotting purposes
fiberStructMain[mainFiberID].initialObj={nextFiberID:fiberStructExtended[nextFiberID]}
fiberStructMain[mainFiberID].processPointCloudToFiberObj(
processingMinFiberLength,
tagAngleTooSteep,
maxSteepnessAngle,
doTrimming=doTrimming
)
if len(fiberStructMain[fibID_end].addedTo)>1:
#print("Segment added to more than one fiber (causes branching)")
raise RuntimeError("Segment added to more than one fiber (branching in reverse, converging)")
return fiberStructExtended,times_tracking
@classmethod
def getPointsInSlice(cls,iSlice):
return cls.classAttributes["trackedCenterPoints"].getPointsInSlice(iSlice)
@classmethod
def initTrackedCenterPoints(cls,nSlices,offset):
cls.classAttributes["trackedCenterPoints" ]=trackedCenterPointsObj(nSlices,offset)
cls.classAttributes["rejectedCenterPoints"]=trackedCenterPointsObj(nSlices,offset)
cls.classAttributes["trimmedCenterPoints" ]=trackedCenterPointsObj(nSlices,offset)
@classmethod
def getTrackedCenterPoints(cls):
return cls.classAttributes["trackedCenterPoints"].getPoints()
@classmethod
def getRejectedCenterPoints(cls):
return cls.classAttributes["rejectedCenterPoints"].getPoints()
@classmethod
def appendTrackedCenterPoints(cls,iSlice,x,y,fiberID,rejected):
if rejected:
cls.classAttributes["rejectedCenterPoints"].append(iSlice,x,y,fiberID)
else:
cls.classAttributes["trackedCenterPoints"] .append(iSlice,x,y,fiberID)
@classmethod
def rejectTrackedCenterPoints(cls,iSlice,x,y,fiberID,rejected,trimming):
if trimming:
if rejected:
# source # target
cls.classAttributes["rejectedCenterPoints"].reject(cls.classAttributes["trimmedCenterPoints"],iSlice,x,y,fiberID)
else: # source # target
cls.classAttributes["trackedCenterPoints" ].reject(cls.classAttributes["trimmedCenterPoints"],iSlice,x,y,fiberID)
else: # source # target
cls.classAttributes["trackedCenterPoints"].reject(cls.classAttributes["rejectedCenterPoints"],iSlice,x,y,fiberID)
@classmethod
def restoreRejectedPoints(cls,iSlice,x,y,fiberID):
cls.classAttributes["rejectedCenterPoints"].reject(cls.classAttributes["trackedCenterPoints"],iSlice,x,y,fiberID)
@classmethod
def transferID(cls,iSlice,oldID,newID,rejected):
if rejected:
cls.classAttributes["rejectedCenterPoints"].transferID(iSlice,oldID,newID)
else:
cls.classAttributes["trackedCenterPoints"] .transferID(iSlice,oldID,newID)
@classmethod
def getColor(cls,label):
return cls.classAttributes["colors"][label]
@classmethod
def addLegendLabel(cls,label):
cls.classAttributes["legendLabels"].add(label)
@classmethod
def getLegendLabels(cls,plotRejectedFibers):
legendLabels=cls.classAttributes["legendLabels"].copy()
if not plotRejectedFibers:
if "too steep" in legendLabels:
legendLabels.remove("too steep")
if "too short" in legendLabels:
legendLabels.remove("too short")
return legendLabels
@classmethod
def removeLegendLabels(cls,*labels):
for label in labels:
if label in cls.classAttributes["legendLabels"]:
cls.classAttributes["legendLabels"].remove(label)
@classmethod
def loadSegmentationMask(cls,V_fibers):
cls.classAttributes["V_fibers"]=V_fibers
@classmethod
def checkIfPointIsInSegt(cls,x,y,z):
return cls.classAttributes["V_fibers"][int(z),int(x),int(y)]==255
@classmethod
def setTrackingParameters(cls,distance,fraction,fillingNumberAlwaysAllowed,maxTrimPoints):
cls.classAttributes["collisionDistance"]=distance
cls.classAttributes["fillingFraction"] =fraction
cls.classAttributes["fillingNumberAlwaysAllowed"] =fillingNumberAlwaysAllowed
cls.classAttributes["maxTrimPoints"] =maxTrimPoints
def __init__(self,fiberID,x=None,y=None,z=None,color="basic",appendTrackedCenterPoint=True):
#appendTrackedCenterPoint==False for fiberObjs kept only for plotting purposes
if x is not None:
self.x=np.array([x],float)
self.y=np.array([y],float)
self.z=np.array([z],float)
else:
self.x=[]
self.y=[]
self.z=[]
self.extendedBy=[]
self.extendedByObj={} #pointers to actual fiberObj, serves to update tags and colors
self.addedTo=[]
self.tags=set([])
self.fiberID=fiberID
self.setColor(color)
self.rejected=False
if x is not None and appendTrackedCenterPoint :
self.appendTrackedCenterPoints(z,float(x),float(y),fiberID,rejected=False)
self.classAttributes["listFiberIDs_tracked"].add(fiberID)
def copy(self):
newFiber=fiberObj(
self.fiberID,
color=self.colorLabel,
appendTrackedCenterPoint=False
)
newFiber.x =self.x.copy()
newFiber.y =self.y.copy()
newFiber.z =self.z.copy()
newFiber.setColor(self.colorLabel)
newFiber.extendedBy =self.extendedBy
newFiber.extendedByObj =self.extendedByObj
newFiber.addedTo =self.addedTo
newFiber.tags =self.tags.copy()
newFiber.meanPntCloud =self.meanPntCloud
newFiber.startPnt =self.startPnt
newFiber.endPnt =self.endPnt
newFiber.totalLength =self.totalLength
newFiber.orientationVec =self.orientationVec
if "originalFiberID" in self.__dir__():
newFiber.originalFiberID=self.originalFiberID
if "suffix" in self.__dir__():
newFiber.suffix=self.suffix
if "trimmedPoints" in self.__dir__():
newFiber.trimmedPoints=self.trimmedPoints
return newFiber
def append(self,x,y,z):
if len([z])>1:
raise ValueError("Use extend to append more than one point")
if (z-self.z[-1])>1.: #(case of dumb stitching)
# here the endpoint should not be appended right away, as it will be added at the end of this append.
# the separate append at the end is necessary for all other appends that are not dumb stitiching
# temporary fiberObj, with interpolated points
temp=fiberObj(self.fiberID,x,y,z,appendTrackedCenterPoint=False)
#fill-in with centerPoints between self and temp fiberObj so the watershed transform can be done
#only the interpolated points are appended, not the endpoints
fillingSuccessful,zStart=self.filling(temp)
if fillingSuccessful:
print("\t\tknn stitching \tfiberID: {: 4.0f} \tfrom slices: {: 4.0f} \tto {: 4.0f}".\
format(self.fiberID,int(zStart),z))
self.x=np.append(self.x,x)
self.y=np.append(self.y,y)
self.z=np.append(self.z,z)
self.appendTrackedCenterPoints(z,x,y,self.fiberID,self.rejected)
return
if (z-self.z[-1])==1.:
self.x=np.append(self.x,x)
self.y=np.append(self.y,y)
self.z=np.append(self.z,z)
self.appendTrackedCenterPoints(z,x,y,self.fiberID,self.rejected)
return
def filling(self,otherFiberObj,checkIfInSegt=True,createNewPoints=True,fibersAll=None,doTrimming=True):
zStart =self.z[-1]
zEnd =otherFiberObj.z[0]
if zStart>=zEnd:
if zEnd-self.z[0]<2:
print("self.fiberID={},other.fiberID={},less than 2 pnts would be left after trimming, do not append".\
format(self.fiberID,otherFiberObj.fiberID))
return False,None
if doTrimming:
#trimming wont work when combining fibers that were transposed, as the z value wont be monotonous
self.trimEndPoints(endPntOther=zEnd)
zStart =self.z[-1]
else:
print("\ncant do trimming() operation between fibers {: >8.3f} and {: >8.3f}, extend without filling\n".\
format(self.fiberID,otherFiberObj.fiberID))
return True,None
zFill =np.linspace(zStart,zEnd,num=int(round(zEnd-zStart+1)),endpoint=True)[1:-1]
if not createNewPoints:
# stitchingType: smart_transposed or smart_lastPass:
# if any point are needed outside of V_fibers==True,
# tag this fiberObj for post-processing, need to add voxels as well in gap filled with
self.tags.add("fill_interpolation_secondPass")
if self.fiberID in self.classAttributes["interpolatedCenters"].keys():
# if there is more than one gap, postProcessing will select largest
self.classAttributes["interpolatedCenters"][self.fiberID].append(len(zFill))
else:
self.classAttributes["interpolatedCenters"][self.fiberID]=[len(zFill)]
return True,None
if len(zFill)>0 and createNewPoints:
xStart =self.x[-1]
xEnd =otherFiberObj.x[0]
yStart =self.y[-1]
yEnd =otherFiberObj.y[0]
#treating x and y coordinates as linear functions of z, i.e. x=f(z)=a*z+b
xFill =np.interp(zFill,[zStart,zEnd], [xStart, xEnd])
yFill =np.interp(zFill,[zStart,zEnd], [yStart, yEnd])
#check if the the majority of the added segment is in region segmented as fiber
testIfInSegt=[]
for i in range(len(zFill)):
points,fiberIDs=fiberObj.getPointsInSlice(int(zFill[i]))
if len(points)>0:
# insertion test to check nearest neighbor
queryPnt=np.array( [np.array([ xFill[i],yFill[i] ]) ] )
# TODO (would be nice) Here a new kdTree is created at each query, but this can be necessary if
# a previous filling() has added points in this slice
# Could be optimized to create new kdTree only if there is a change,
# but for probably slim performance gain
id_bottom_th,id_top_th,dist = knn(
queryPnt,
points,
self.classAttributes["collisionDistance"],
returnDist=True
)
if len(id_bottom_th)>0:
for iCollision,iPnt in enumerate(id_bottom_th):
print("\t\tcollision at imSlice={: >5.0f},\t(x,y)=({: >6.1f},{: >6.1f}), \tdistance={: >6.1f},\t between fibers:{} and {}".\
format(int(zFill[i]),xFill[i],yFill[i],dist[iCollision],fiberIDs[iPnt],self.fiberID))
return False,None #collision present, will not stitch fiberObj
if checkIfInSegt and len(zFill)>self.classAttributes["fillingNumberAlwaysAllowed"]:
testIfInSegt.append(fiberObj.checkIfPointIsInSegt(xFill[i],yFill[i],zFill[i]))
else:
# if gap shorter than fillingNumberAlwaysAllowed, always do filling
testIfInSegt.append(True)
# check if majority of potential filling points are in regions segmented as fiber
doFilling=False
if testIfInSegt.count(True)/len(testIfInSegt)>self.classAttributes["fillingFraction"]:
# if any point are needed outside of V_fibers==True,
# tag this fiberObj for post-processing, need to add voxels as well in gap filled with
self.tags.add("fill_interpolation")
if self.fiberID in self.classAttributes["interpolatedCenters"].keys():
self.classAttributes["interpolatedCenters"][self.fiberID].append(len(zFill))
else:
self.classAttributes["interpolatedCenters"][self.fiberID]=[len(zFill)]
testIfInSegt=[True]*len(testIfInSegt)
doFilling=True
if doFilling:
self.x=np.append(self.x,xFill)
self.y=np.append(self.y,yFill)
self.z=np.append(self.z,zFill)
for iCt in range(len(zFill)):
#only the new interpolated points are appended, not the endpoints
self.appendTrackedCenterPoints(zFill[iCt],xFill[iCt],yFill[iCt],self.fiberID,self.rejected)
return True,zStart
else:
for i in range(len(zFill)):
if not testIfInSegt[i]:
print("\t\tfilling rejected between fibers: {},{}, imSlice={} because not in V_fibers==True regions,\t(x,y)=({: >6.1f},{: >6.1f})".\
format(self.fiberID,otherFiberObj.fiberID ,int(zFill[i]),xFill[i],yFill[i]))
return False,None
else:
return True, None # no need to add points, means start and endPnts are on adjacent slices. do blindstitch
def extendFiber(self,otherFiberObj,linkOrder,stitchingType,checkIfInSegt=True,createNewPoints=True,suffix=None,fibersAll=None,doTrimming=True):
if len(otherFiberObj.x) != len(otherFiberObj.y) or len(otherFiberObj.x) != len(otherFiberObj.z):
raise ValueError("inconsistent sizes for x, y, z")
iFib=linkOrder[0]
tFib=linkOrder[1]
#fill-in with centerPoints between self and otherFibObj so the watershed transform can be done
#collision test results in testIfInSegt
fillingSuccessful,zStart=self.filling(
otherFiberObj,
checkIfInSegt=checkIfInSegt,
createNewPoints=createNewPoints,
fibersAll=fibersAll,
doTrimming=doTrimming
)
if fillingSuccessful:
self.x=np.append(self.x,otherFiberObj.x)
self.y=np.append(self.y,otherFiberObj.y)
self.z=np.append(self.z,otherFiberObj.z)
if stitchingType=="blind":
labelStr_extended ="stitched_blind(extended)"
labelStr_added ="stitched_blind(added)"
elif stitchingType=="smart":
if self.fiberID in self.classAttributes["backTracking"].keys():
labelStr_extended="backTracking"
else:
labelStr_extended="stitched_smart(extended)"
labelStr_added="stitched_smart(added)"
elif stitchingType =="smart_transposed":
if self.fiberID in self.classAttributes["backTracking"].keys():
labelStr_extended="backTracking_transposed"
else:
labelStr_extended="stitched_smart(extended)_transposed"
labelStr_added="stitched_smart(added)_transposed"
elif stitchingType =="smart_lastPass":
if self.fiberID in self.classAttributes["backTracking"].keys():
labelStr_extended="backTracking_lastPass"
else:
labelStr_extended="stitched_smart(extended)_lastPass"
labelStr_added="stitched_smart(added)_lastPass"
self.setColor(labelStr_extended)
self.extendedBy.append(int(tFib)) # can be numpy.int32, from knn() implementation
self.extendedByObj[int(tFib)]=otherFiberObj
self.tags.add(labelStr_extended)
self.addLegendLabel(labelStr_extended)
otherFiberObj.setColor(labelStr_added)
if suffix is None:
otherFiberObj.addedTo.append(int(iFib))
else:
otherFiberObj.addedTo.append(int(iFib)+suffix)
if len(otherFiberObj.addedTo)>1:
raise RuntimeError("attempting to add segment to more than one fiber (branching in reverse, converging)")
otherFiberObj.tags.add(labelStr_added)
otherFiberObj.zOffset=True
self.addLegendLabel(labelStr_added)
if len(otherFiberObj.extendedBy)>0:
# preserve stitching chains
for extensionID in otherFiberObj.extendedBy:
if extensionID not in self.extendedBy:
self.extendedBy.append(extensionID)
self.extendedByObj[extensionID]=otherFiberObj.extendedByObj[extensionID]
if otherFiberObj.rejected and not self.rejected:
# move points from rejectedCenterPoints to trackedCenterPoints, as they are now part of a tracked fiber
# (fiberID will be removed from listFiberIDs_tracked on subsequent step)
otherFiberObj.restorePoints()
#change fiberID on trackedCenterPoints
for iSlice in otherFiberObj.z:
self.transferID(int(iSlice),otherFiberObj.fiberID,self.fiberID,False) # otherFiberObj.rejected=True, but centerPoints are in "tracked" object
# elif self.rejected and not otherFiberObj.rejected:
# the points will be handled correctly at processPointCloudToFiberObj
else:
#change fiberID on trackedCenterPoints
for iSlice in otherFiberObj.z:
self.transferID(int(iSlice),otherFiberObj.fiberID,self.fiberID,otherFiberObj.rejected)
if otherFiberObj.fiberID in self.classAttributes["listFiberIDs_tracked"]:
self.classAttributes["listFiberIDs_tracked"].remove(otherFiberObj.fiberID)
return fillingSuccessful
def setColor(self,colorLabel):
if type(colorLabel) is not str or colorLabel not in fiberObj.classAttributes["colors"].keys():
raise TypeError("colorLabel must be a string, corresponding to a key in fiberObj.classAttributes[\"colors\"]")
self.color=fiberObj.classAttributes["colors"][colorLabel]
self.colorLabel=colorLabel
self.classAttributes["LUT_fiberID_to_color"][self.fiberID]=colorLabel
def rejectPoints(self,pos=None):
if pos is None: #reject entire fiberObj (too short, too steep)
trimming=False
rejectRange = range(len(self.z))
if self.fiberID in self.classAttributes["listFiberIDs_tracked"]:
self.classAttributes["listFiberIDs_tracked"].remove(self.fiberID)
else: #reject only some points, from trimming (trimEndPnts)
trimming=True
rejectRange = [pos]
for iCt in rejectRange:
self.rejectTrackedCenterPoints(self.z[iCt],self.x[iCt],self.y[iCt],self.fiberID,rejected=self.rejected,trimming=trimming)
def restorePoints(self):
self.classAttributes["listFiberIDs_tracked"].add(self.fiberID)
for iCt in range(len(self.z)):
self.restoreRejectedPoints(self.z[iCt],self.x[iCt],self.y[iCt],self.fiberID)
def trimEndPoints(self,endPntOther=None):
if endPntOther is None:
startPnt=round(self.startPnt[2])
endPnt =round(self.endPnt[2])
trimStart=True
trimEnd =True
else:
# this is for the edgecase where smartStitching is done on
# fibers that have a common z end and startPnt
endPnt=endPntOther-0.1 # to not allow stric equality in this case
trimStart=False
trimEnd =True
self.classAttributes["backTracking"][self.fiberID]=endPntOther
self.setColor("backTracking")
pos=0
while trimStart:
#remove points from trackedCenterPoints object
if self.z[pos] < startPnt:
self.rejectPoints(pos)
pos+=1
elif pos>self.classAttributes["maxTrimPoints"]:
print("trimming {} points on fiberID {}. something is wrong".format(pos,self.fiberID))
self.tags.add("trimmed_by_{}_points".format(pos))
trimStart=False
else:
trimStart=False
deleteIndicesStart=[i for i in range (pos)]
originalNumPnt=len(self.z)
pos=originalNumPnt-1
while trimEnd:
if self.z[pos] > endPnt:
self.rejectPoints(pos)
pos-=1
if pos<0:
raise RuntimeError("backtracking causes the trimming of entire fiberObj, should not happen")
elif originalNumPnt-pos>self.classAttributes["maxTrimPoints"]:
print("trimming {} points on fiberID {}. something is wrong".format(originalNumPnt-pos,self.fiberID))
self.tags.add("trimmed_by_{}_points".format(originalNumPnt-pos))
trimEnd=False
else:
trimEnd=False
deleteIndices=deleteIndicesStart+[i for i in range (pos+1,originalNumPnt)]
if len(deleteIndices)>0:
self.tags.add("trimmed")
print("\ttrimming endPoints on fiberObj: {} at positions: {}".format(self.fiberID,deleteIndices))
if "trimmedPoints" not in self.__dir__():
self.trimmedPoints={
"x":list(self.x[deleteIndices]),
"y":list(self.y[deleteIndices]),
"z":list(self.z[deleteIndices])
}
else:
self.trimmedPoints["x"].extend(list(self.x[deleteIndices]))
self.trimmedPoints["y"].extend(list(self.y[deleteIndices]))
self.trimmedPoints["z"].extend(list(self.z[deleteIndices]))
self.x=np.delete(self.x,deleteIndices)
self.y=np.delete(self.y,deleteIndices)
self.z=np.delete(self.z,deleteIndices)
@staticmethod
def findFarthestPnts(data,vv,sort=False):
""" find fartest points along principal direction"""
dataMean = data.mean(axis=0)
dist=np.zeros(len(data))
for iPnt in range(len(data)):
vecToPnt=data[iPnt,:]-dataMean
dist[iPnt]=np.dot(vv,vecToPnt)
#projection of dataPoints farthest from the mean onto the principal vector
endPntMin=dataMean+vv*np.dot(vv,data[dist.argmin(),:]-dataMean)
endPntMax=dataMean+vv*np.dot(vv,data[dist.argmax(),:]-dataMean)
if sort:
sorting_indices=np.argsort(dist)
return dataMean,endPntMin,endPntMax,sorting_indices
else:
return dataMean,endPntMin,endPntMax,None
def checkSpread(self,maxSpread,verboseHandle=False):
if verboseHandle:
print("checkSpread for fiberID={} on {}".format(self.fiberID,multiprocessing.current_process().name))
distStart=10000.
distEnd =10000.
data = np.concatenate(
(
self.x[:, np.newaxis],
self.y[:, np.newaxis],
self.z[:, np.newaxis]
),
axis=1
)
trimList=[]
while distStart>maxSpread or distEnd>maxSpread:
# Calculate the mean of the points, i.e. the 'center' of the cloud
datamean = data.mean(axis=0)
# Do an SVD on the mean-centered data.
# uu, dd, vv = np.linalg.svd(data - datamean) #for some mysterious reason, the numpy implementation sometime wont converge
uu, dd, vv = scipy.linalg.svd(data - datamean)
self.meanPntCloud,startPnt,endPnt,sorting_indices=\
fiberObj.findFarthestPnts(data,vv[0])
distStart=np.linalg.norm(data[ 0,:]-startPnt)
distEnd =np.linalg.norm(data[-1,:]-endPnt)
if distStart>maxSpread or distEnd>maxSpread:
if self.rejected:
raise ValueError("checkSpread method is not implemented for rejected fibers")
if distStart>distEnd:
# remove first point
# keep list to remove from trackedCenterPoints. since this is a global class attribute, cannot be done in a parallel process due to GIL
trimList.append((self.z[0],self.x[0],self.y[0],self.fiberID))
self.x=self.x[1:]
self.y=self.y[1:]
self.z=self.z[1:]
data =data [1:]
else:
#remove last point
trimList.append((self.z[-1],self.x[-1],self.y[-1],self.fiberID))
self.x=self.x[:-1]
self.y=self.y[:-1]
self.z=self.z[:-1]
data =data [:-1]
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot3D(self.x,self.y,self.z,c="red")
# ax.plot3D([startPnt[0],endPnt[0]],[startPnt[1],endPnt[1]],[startPnt[2],endPnt[2]],c="blue")
# plt.show()
return trimList
def processPointCloudToFiberObj(self,minFiberLength,tagAngleTooSteep,maxSteepnessAngle,sort=False,doTrimming=True):
data = np.concatenate(
(
self.x[:, np.newaxis],
self.y[:, np.newaxis],
self.z[:, np.newaxis]
),
axis=1
)
# Calculate the mean of the points, i.e. the 'center' of the cloud
datamean = data.mean(axis=0)
# Do an SVD on the mean-centered data.
uu, dd, vv = np.linalg.svd(data - datamean)
self.meanPntCloud,startPnt,endPnt,sorting_indices=\
fiberObj.findFarthestPnts(data,vv[0],sort=sort)
if sort: #used in fiberObj.combine(), because the points will be non-monotonical
self.x=self.x[sorting_indices]
self.y=self.y[sorting_indices]
self.z=self.z[sorting_indices]
# #TODO: would be nice: unstitching for spread too large
# #if spread is too large, stitching should be reversed
# lineEndPnts_lateralSpread=\
# findFarthestPnts(data,vv[1])[1]
# spreadLength=np.linalg.norm(lineEndPnts_lateralSpread[:,1]-lineEndPnts_lateralSpread[:,0])
# if spreadLength>20:
# print("object should be unstitched")
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot3D(self.x,self.y,self.z,c="red")
# # ax.plot3D(lineEndPnts[0,:],lineEndPnts[1,:],lineEndPnts[2,:],c="blue")
# plt.show()
# these points are not in the original datacloud:
# they are projections of the points furthest from mean onto principal vector
if startPnt[2]>endPnt[2]:
#sometimes the endpoints are flipped
buffer=startPnt
startPnt=endPnt
endPnt=buffer
self.startPnt=startPnt
self.endPnt=endPnt
orientationVec=endPnt-startPnt
self.orientationVec=orientationVec
totalLength=np.linalg.norm(orientationVec)
self.totalLength=totalLength
self.oriVec_normalized=orientationVec/totalLength
rejectedCache=self.rejected
tooShort=False
# Mark fibers that are suspiciously short
if totalLength < minFiberLength:
self.setColor("too short")
self.tags.add("length.LT.{: >6.4f}".format(minFiberLength))
self.addLegendLabel("too short")
self.rejected=True
self.rejectPoints()
tooShort=True
for extendedFibObj in self.extendedByObj.values():
extendedFibObj.rejected=True
if extendedFibObj.fiberID in self.classAttributes["listFiberIDs_tracked"]:
self.classAttributes["listFiberIDs_tracked"].remove(extendedFibObj.fiberID)
if "initialObj" in self.__dir__():
if len(self.initialObj)>1:
raise ValueError("can't be more than one initialObj")
for fibObj in self.initialObj.values():
fibObj.rejected=True
else:
self.rejected=False
if tagAngleTooSteep:
dotProd=np.dot(orientationVec/totalLength,[0.,0.,1.])
if dotProd<np.cos(maxSteepnessAngle):
self.setColor("too steep")
self.tags.add(
"tooSteep_angle={: >6.4f}".format(
np.degrees(np.arccos(dotProd))))
self.addLegendLabel("too steep")
self.rejected=True
self.rejectPoints()
for extendedFibObj in self.extendedByObj.values():
extendedFibObj.rejected=True
if extendedFibObj.fiberID in self.classAttributes["listFiberIDs_tracked"]:
self.classAttributes["listFiberIDs_tracked"].remove(extendedFibObj.fiberID)
if "initialObj" in self.__dir__():
if len(self.initialObj)>1:
raise ValueError("can't be more than one initialObj")
for fibObj in self.initialObj.values():
fibObj.rejected=True
elif not tooShort:
self.rejected=False
if rejectedCache and not self.rejected:
#used to be rejected but now is long enough
self.restorePoints()
for extendedFibObj in self.extendedByObj.values():
extendedFibObj.rejected=False
if "initialObj" in self.__dir__():
if len(self.initialObj)>1:
raise ValueError("can't be more than one initialObj")
for fibObj in self.initialObj.values():
fibObj.rejected=False
# reject points that are beyond startPnt and endPnt in the z direction
# this is sometimes a consequence of knn sometimes stitching points that
# are along different real fibers (will be at an angle)
# this is an edge case that can sometimes cause problems at smartStitching
# IMPORTANT keep this step last, or trimmed point may be in wrong ("tracked"/"rejected") set
# if restorePoints() or rejectPoints() is applied afterwards, wont get removed, and cause collisions
# on following stitching
if doTrimming:
#doTrimming will be False when smartStitching_transpose, as self.z will not increase monotonically
self.trimEndPoints()
def transpose(self,permutationVec):
if permutationVec=="123":
#add suffix to differentiate between origin referentials
self.fiberID =self.fiberID+0.123
self.suffix =0.123
if permutationVec=="132":
temp=self.y
self.y=self.z
self.z=temp
self.endPnt =self.endPnt [[0,2,1]]
self.startPnt =self.startPnt [[0,2,1]]
self.meanPntCloud =self.meanPntCloud [[0,2,1]]
self.orientationVec=self.orientationVec [[0,2,1]]
# add suffix to differentiate between origin referentials
self.fiberID =self.fiberID+0.132
self.suffix =0.132
if permutationVec=="321":
temp=self.x
self.x=self.z
self.z=temp
self.endPnt =self.endPnt [[2,1,0]]
self.startPnt =self.startPnt [[2,1,0]]
self.meanPntCloud =self.meanPntCloud [[2,1,0]]
self.orientationVec=self.orientationVec [[2,1,0]]
# add suffix to differentiate between origin referentials
self.fiberID =self.fiberID+0.321
self.suffix =0.321
self.tags.add("transposed_from:{}".format(permutationVec))
# so stitches at combine() stage are stored separately than those from tracking() stage
self.extendedByFirstPass=self.extendedBy
self.extendedByObjFirstPass=self.extendedByObj
self.extendedBy=[]
self.extendedByObj={}
def combine(self,otherFiberObj):
self.x=np.append(self.x,otherFiberObj.x)
self.y=np.append(self.y,otherFiberObj.y)
self.z=np.append(self.z,otherFiberObj.z)
self.processPointCloudToFiberObj(10.,False,None,sort=True,doTrimming=False)
if "combinedWith" in self.__dir__():
self.combinedWith .append(otherFiberObj.fiberID)
self.combinedWithObj.append(otherFiberObj)
else:
self.combinedWith =[otherFiberObj.fiberID]
self.combinedWithObj=[otherFiberObj]
if "combinedWith" in otherFiberObj.__dir__():
otherFiberObj.combinedWith .append(self.fiberID)
otherFiberObj.combinedWithObj.append(self)
else:
otherFiberObj.combinedWith =[self.fiberID]
otherFiberObj.combinedWithObj=[self]
| 42.98374
| 161
| 0.586265
|
b8c46b6590abb462c6d5d78beffc0c0b2d8e955b
| 136
|
py
|
Python
|
src/testAdministrator/views.py
|
perrons93/ots
|
6102cc5e2e7a927036ab71fb719bada0fd225f7f
|
[
"MIT"
] | null | null | null |
src/testAdministrator/views.py
|
perrons93/ots
|
6102cc5e2e7a927036ab71fb719bada0fd225f7f
|
[
"MIT"
] | null | null | null |
src/testAdministrator/views.py
|
perrons93/ots
|
6102cc5e2e7a927036ab71fb719bada0fd225f7f
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def splash_page(request):
return render(request, 'splashpage.html')
| 19.428571
| 45
| 0.764706
|
d6a949f079738f83fe6ef4a0b0854e3d23c2e607
| 1,132
|
py
|
Python
|
flink-python/pyflink/version.py
|
sbairos/flink
|
0799b5c20a127110e47439668cf8f8db2e4ecbf3
|
[
"Apache-2.0"
] | 16
|
2019-09-24T02:28:08.000Z
|
2021-06-22T09:16:05.000Z
|
flink-python/pyflink/version.py
|
sbairos/flink
|
0799b5c20a127110e47439668cf8f8db2e4ecbf3
|
[
"Apache-2.0"
] | 15
|
2021-06-13T18:06:12.000Z
|
2022-02-09T22:40:04.000Z
|
flink-python/pyflink/version.py
|
sbairos/flink
|
0799b5c20a127110e47439668cf8f8db2e4ecbf3
|
[
"Apache-2.0"
] | 7
|
2020-04-13T02:24:44.000Z
|
2021-08-06T10:06:26.000Z
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
The pyflink version will be consistent with the flink version and follow the PEP440.
.. seealso:: https://www.python.org/dev/peps/pep-0440
"""
__version__ = "1.12.dev0"
| 47.166667
| 84
| 0.647527
|
fa557d04474375ef9fcdbe5f2d1be4a6a33613b5
| 29,261
|
py
|
Python
|
kapitan/cli.py
|
chids/kapitan
|
ac380c48d0638c3734adb991c38fae8c24a5681a
|
[
"Apache-2.0"
] | null | null | null |
kapitan/cli.py
|
chids/kapitan
|
ac380c48d0638c3734adb991c38fae8c24a5681a
|
[
"Apache-2.0"
] | null | null | null |
kapitan/cli.py
|
chids/kapitan
|
ac380c48d0638c3734adb991c38fae8c24a5681a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2019 The Kapitan Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"command line module"
from __future__ import print_function
import argparse
import base64
import json
import logging
import os
import sys
import yaml
from kapitan.errors import KapitanError, RefHashMismatchError
from kapitan.initialiser import initialise_skeleton
from kapitan.lint import start_lint
from kapitan.refs.base import Ref, RefController, Revealer
from kapitan.refs.secrets.awskms import AWSKMSSecret
from kapitan.refs.secrets.gkms import GoogleKMSSecret
from kapitan.refs.secrets.gpg import GPGSecret, lookup_fingerprints
from kapitan.resources import (inventory_reclass, resource_callbacks,
search_imports)
from kapitan.targets import compile_targets
from kapitan.inputs.jinja2_filters import default_jinja2_filters_path
from kapitan.utils import (PrettyDumper, check_version, deep_get, fatal_error,
flatten_dict, from_dot_kapitan, jsonnet_file,
search_target_token_paths, searchvar)
from kapitan.version import DESCRIPTION, PROJECT_NAME, VERSION
logger = logging.getLogger(__name__)
def main():
"""main function for command line usage"""
parser = argparse.ArgumentParser(prog=PROJECT_NAME,
description=DESCRIPTION)
parser.add_argument('--version', action='version', version=VERSION)
subparser = parser.add_subparsers(help="commands")
eval_parser = subparser.add_parser('eval', help='evaluate jsonnet file')
eval_parser.add_argument('jsonnet_file', type=str)
eval_parser.add_argument('--output', type=str,
choices=('yaml', 'json'),
default=from_dot_kapitan('eval', 'output', 'yaml'),
help='set output format, default is "yaml"')
eval_parser.add_argument('--vars', type=str,
default=from_dot_kapitan('eval', 'vars', []),
nargs='*',
metavar='VAR',
help='set variables')
eval_parser.add_argument('--search-paths', '-J', type=str, nargs='+',
default=from_dot_kapitan('eval', 'search-paths', ['.']),
metavar='JPATH',
help='set search paths, default is ["."]')
compile_parser = subparser.add_parser('compile', help='compile targets')
compile_parser.add_argument('--search-paths', '-J', type=str, nargs='+',
default=from_dot_kapitan('compile', 'search-paths', ['.', 'lib']),
metavar='JPATH',
help='set search paths, default is ["."]')
compile_parser.add_argument('--jinja2-filters', '-J2F', type=str,
default=from_dot_kapitan('compile', 'jinja2-filters',
default_jinja2_filters_path),
metavar='FPATH',
help='load custom jinja2 filters from any file, default is to put\
them inside lib/jinja2_filters.py')
compile_parser.add_argument('--verbose', '-v', help='set verbose mode',
action='store_true',
default=from_dot_kapitan('compile', 'verbose', False))
compile_parser.add_argument('--prune', help='prune jsonnet output',
action='store_true',
default=from_dot_kapitan('compile', 'prune', False))
compile_parser.add_argument('--quiet', help='set quiet mode, only critical output',
action='store_true',
default=from_dot_kapitan('compile', 'quiet', False))
compile_parser.add_argument('--output-path', type=str,
default=from_dot_kapitan('compile', 'output-path', '.'),
metavar='PATH',
help='set output path, default is "."')
compile_parser.add_argument('--targets', '-t', help='targets to compile, default is all',
type=str, nargs='+',
default=from_dot_kapitan('compile', 'targets', []),
metavar='TARGET')
compile_parser.add_argument('--parallelism', '-p', type=int,
default=from_dot_kapitan('compile', 'parallelism', 4),
metavar='INT',
help='Number of concurrent compile processes, default is 4')
compile_parser.add_argument('--indent', '-i', type=int,
default=from_dot_kapitan('compile', 'indent', 2),
metavar='INT',
help='Indentation spaces for YAML/JSON, default is 2')
compile_parser.add_argument('--secrets-path', help='set secrets path, default is "./secrets"',
default=from_dot_kapitan('compile', 'secrets-path', './secrets'))
compile_parser.add_argument('--reveal',
help='reveal secrets (warning: this will write sensitive data)',
action='store_true',
default=from_dot_kapitan('compile', 'reveal', False))
compile_parser.add_argument('--inventory-path',
default=from_dot_kapitan('compile', 'inventory-path', './inventory'),
help='set inventory path, default is "./inventory"')
compile_parser.add_argument('--cache', '-c',
help='enable compilation caching to .kapitan_cache, default is False',
action='store_true',
default=from_dot_kapitan('compile', 'cache', False))
compile_parser.add_argument('--cache-paths', type=str, nargs='+',
default=from_dot_kapitan('compile', 'cache-paths', []),
metavar='PATH',
help='cache additional paths to .kapitan_cache, default is []')
compile_parser.add_argument('--ignore-version-check',
help='ignore the version from .kapitan',
action='store_true',
default=from_dot_kapitan('compile', 'ignore-version-check', False))
inventory_parser = subparser.add_parser('inventory', help='show inventory')
inventory_parser.add_argument('--target-name', '-t',
default=from_dot_kapitan('inventory', 'target-name', ''),
help='set target name, default is all targets')
inventory_parser.add_argument('--inventory-path',
default=from_dot_kapitan('inventory', 'inventory-path', './inventory'),
help='set inventory path, default is "./inventory"')
inventory_parser.add_argument('--flat', '-F', help='flatten nested inventory variables',
action='store_true',
default=from_dot_kapitan('inventory', 'flat', False))
inventory_parser.add_argument('--pattern', '-p',
default=from_dot_kapitan('inventory', 'pattern', ''),
help='filter pattern (e.g. parameters.mysql.storage_class, or storage_class,' +
' or storage_*), default is ""')
inventory_parser.add_argument('--verbose', '-v', help='set verbose mode',
action='store_true',
default=from_dot_kapitan('inventory', 'verbose', False))
searchvar_parser = subparser.add_parser('searchvar',
help='show all inventory files where var is declared')
searchvar_parser.add_argument('searchvar', type=str, metavar='VARNAME',
help='e.g. parameters.mysql.storage_class, or storage_class, or storage_*')
searchvar_parser.add_argument('--inventory-path',
default=from_dot_kapitan('searchvar', 'inventory-path', './inventory'),
help='set inventory path, default is "./inventory"')
searchvar_parser.add_argument('--verbose', '-v', help='set verbose mode',
action='store_true',
default=from_dot_kapitan('searchvar', 'verbose', False))
searchvar_parser.add_argument('--pretty-print', '-p', help='Pretty print content of var',
action='store_true',
default=from_dot_kapitan('searchvar', 'pretty-print', False))
secrets_parser = subparser.add_parser('secrets', help='manage secrets')
secrets_parser.add_argument('--write', '-w', help='write secret token',
metavar='TOKENNAME',)
secrets_parser.add_argument('--update', help='update recipients for secret token',
metavar='TOKENNAME',)
secrets_parser.add_argument('--update-targets', action='store_true',
default=from_dot_kapitan('secrets', 'update-targets', False),
help='update target secrets')
secrets_parser.add_argument('--validate-targets', action='store_true',
default=from_dot_kapitan('secrets', 'validate-targets', False),
help='validate target secrets')
secrets_parser.add_argument('--base64', '-b64', help='base64 encode file content',
action='store_true',
default=from_dot_kapitan('secrets', 'base64', False))
secrets_parser.add_argument('--reveal', '-r', help='reveal secrets',
action='store_true',
default=from_dot_kapitan('secrets', 'reveal', False))
secrets_parser.add_argument('--file', '-f', help='read file or directory, set "-" for stdin',
metavar='FILENAME')
secrets_parser.add_argument('--target-name', '-t', help='grab recipients from target name')
secrets_parser.add_argument('--inventory-path',
default=from_dot_kapitan('secrets', 'inventory-path', './inventory'),
help='set inventory path, default is "./inventory"')
secrets_parser.add_argument('--recipients', '-R', help='set GPG recipients',
type=str, nargs='+',
default=from_dot_kapitan('secrets', 'recipients', []),
metavar='RECIPIENT')
secrets_parser.add_argument('--key', '-K', help='set KMS key',
default=from_dot_kapitan('secrets', 'key', ''),
metavar='KEY')
secrets_parser.add_argument('--secrets-path', help='set secrets path, default is "./secrets"',
default=from_dot_kapitan('secrets', 'secrets-path', './secrets'))
secrets_parser.add_argument('--verbose', '-v',
help='set verbose mode (warning: this will show sensitive data)',
action='store_true',
default=from_dot_kapitan('secrets', 'verbose', False))
lint_parser = subparser.add_parser('lint', help='linter for inventory and secrets')
lint_parser.add_argument('--fail-on-warning',
default=from_dot_kapitan('lint', 'fail-on-warning', False),
action='store_true',
help='exit with failure code if warnings exist, default is False')
lint_parser.add_argument('--skip-class-checks',
action='store_true',
help='skip checking for unused classes, default is False',
default=from_dot_kapitan('lint', 'skip-class-checks', False))
lint_parser.add_argument('--skip-yamllint',
action='store_true',
help='skip running yamllint on inventory, default is False',
default=from_dot_kapitan('lint', 'skip-yamllint', False))
lint_parser.add_argument('--search-secrets',
default=from_dot_kapitan('lint', 'search-secrets', False),
action='store_true',
help='searches for plaintext secrets in inventory, default is False')
lint_parser.add_argument('--secrets-path',
help='set secrets path, default is "./secrets"',
default=from_dot_kapitan('lint', 'secrets-path', './secrets'))
lint_parser.add_argument('--compiled-path',
default=from_dot_kapitan('lint', 'compiled-path', './compiled'),
help='set compiled path, default is "./compiled"')
lint_parser.add_argument('--inventory-path',
default=from_dot_kapitan('lint', 'inventory-path', './inventory'),
help='set inventory path, default is "./inventory"')
init_parser = subparser.add_parser('init', help='initialize a directory with the recommended kapitan project skeleton.')
init_parser.add_argument('--directory',
default=from_dot_kapitan('init', 'directory', '.'),
help='set path, in which to generate the project skeleton, assumes directory already exists. default is "./"')
args = parser.parse_args()
logger.debug('Running with args: %s', args)
try:
cmd = sys.argv[1]
except IndexError:
parser.print_help()
sys.exit(1)
if hasattr(args, 'verbose') and args.verbose:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
elif hasattr(args, 'quiet') and args.quiet:
logging.basicConfig(level=logging.CRITICAL, format="%(message)s")
else:
logging.basicConfig(level=logging.INFO, format="%(message)s")
if cmd == 'eval':
file_path = args.jsonnet_file
search_paths = [os.path.abspath(path) for path in args.search_paths]
ext_vars = {}
if args.vars:
ext_vars = dict(var.split('=') for var in args.vars)
json_output = None
def _search_imports(cwd, imp):
return search_imports(cwd, imp, search_paths)
json_output = jsonnet_file(file_path, import_callback=_search_imports,
native_callbacks=resource_callbacks(search_paths),
ext_vars=ext_vars)
if args.output == 'yaml':
json_obj = json.loads(json_output)
yaml.safe_dump(json_obj, sys.stdout, default_flow_style=False)
elif json_output:
print(json_output)
elif cmd == 'compile':
search_paths = [os.path.abspath(path) for path in args.search_paths]
if not args.ignore_version_check:
check_version()
ref_controller = RefController(args.secrets_path)
compile_targets(args.inventory_path, search_paths, args.output_path,
args.parallelism, args.targets, ref_controller,
prune=(args.prune), indent=args.indent, reveal=args.reveal,
cache=args.cache, cache_paths=args.cache_paths,
jinja2_filters=args.jinja2_filters)
elif cmd == 'inventory':
if args.pattern and args.target_name == '':
parser.error("--pattern requires --target_name")
try:
inv = inventory_reclass(args.inventory_path)
if args.target_name != '':
inv = inv['nodes'][args.target_name]
if args.pattern != '':
pattern = args.pattern.split(".")
inv = deep_get(inv, pattern)
if args.flat:
inv = flatten_dict(inv)
yaml.dump(inv, sys.stdout, width=10000, default_flow_style=False)
else:
yaml.dump(inv, sys.stdout, Dumper=PrettyDumper, default_flow_style=False)
except Exception as e:
if not isinstance(e, KapitanError):
logger.exception("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
sys.exit(1)
elif cmd == 'searchvar':
searchvar(args.searchvar, args.inventory_path, args.pretty_print)
elif cmd == 'lint':
start_lint(args.fail_on_warning, args.skip_class_checks, args.skip_yamllint, args.inventory_path, args.search_secrets, args.secrets_path, args.compiled_path)
elif cmd == 'init':
initialise_skeleton(args.directory)
elif cmd == 'secrets':
ref_controller = RefController(args.secrets_path)
if args.write is not None:
secret_write(args, ref_controller)
elif args.reveal:
secret_reveal(args, ref_controller)
elif args.update:
secret_update(args, ref_controller)
elif args.update_targets or args.validate_targets:
secret_update_validate(args, ref_controller)
def secret_write(args, ref_controller):
"Write secret to ref_controller based on cli args"
token_name = args.write
file_name = args.file
data = None
if file_name is None:
fatal_error('--file is required with --write')
if file_name == '-':
data = ''
for line in sys.stdin:
data += line
else:
with open(file_name) as fp:
data = fp.read()
if token_name.startswith("gpg:"):
type_name, token_path = token_name.split(":")
recipients = [dict((("name", name),)) for name in args.recipients]
if args.target_name:
inv = inventory_reclass(args.inventory_path)
kap_inv_params = inv['nodes'][args.target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(args.target_name))
recipients = kap_inv_params['secrets']['gpg']['recipients']
if not recipients:
raise KapitanError("No GPG recipients specified. Use --recipients or specify them in " +
"parameters.kapitan.secrets.gpg.recipients and use --target")
secret_obj = GPGSecret(data, recipients, encode_base64=args.base64)
tag = '?{{gpg:{}}}'.format(token_path)
ref_controller[tag] = secret_obj
elif token_name.startswith("gkms:"):
type_name, token_path = token_name.split(":")
key = args.key
if args.target_name:
inv = inventory_reclass(args.inventory_path)
kap_inv_params = inv['nodes'][args.target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(args.target_name))
key = kap_inv_params['secrets']['gkms']['key']
if not key:
raise KapitanError("No KMS key specified. Use --key or specify it in parameters.kapitan.secrets.gkms.key and use --target")
secret_obj = GoogleKMSSecret(data, key, encode_base64=args.base64)
tag = '?{{gkms:{}}}'.format(token_path)
ref_controller[tag] = secret_obj
elif token_name.startswith("awskms:"):
type_name, token_path = token_name.split(":")
key = args.key
if args.target_name:
inv = inventory_reclass(args.inventory_path)
kap_inv_params = inv['nodes'][args.target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(args.target_name))
key = kap_inv_params['secrets']['awskms']['key']
if not key:
raise KapitanError("No KMS key specified. Use --key or specify it in parameters.kapitan.secrets.awskms.key and use --target")
secret_obj = AWSKMSSecret(data, key, encode_base64=args.base64)
tag = '?{{awskms:{}}}'.format(token_path)
ref_controller[tag] = secret_obj
elif token_name.startswith("ref:"):
type_name, token_path = token_name.split(":")
_data = data.encode()
encoding = 'original'
if args.base64:
_data = base64.b64encode(_data).decode()
_data = _data.encode()
encoding = 'base64'
ref_obj = Ref(_data, encoding=encoding)
tag = '?{{ref:{}}}'.format(token_path)
ref_controller[tag] = ref_obj
else:
fatal_error("Invalid token: {name}. Try using gpg/gkms/awskms/ref:{name}".format(name=token_name))
def secret_update(args, ref_controller):
"Update secret gpg recipients/gkms/awskms key"
# TODO --update *might* mean something else for other types
token_name = args.update
if token_name.startswith("gpg:"):
# args.recipients is a list, convert to recipients dict
recipients = [dict([("name", name), ]) for name in args.recipients]
if args.target_name:
inv = inventory_reclass(args.inventory_path)
kap_inv_params = inv['nodes'][args.target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(args.target_name))
recipients = kap_inv_params['secrets']['gpg']['recipients']
if not recipients:
raise KapitanError("No GPG recipients specified. Use --recipients or specify them in " +
"parameters.kapitan.secrets.gpg.recipients and use --target")
type_name, token_path = token_name.split(":")
tag = '?{{gpg:{}}}'.format(token_path)
secret_obj = ref_controller[tag]
secret_obj.update_recipients(recipients)
ref_controller[tag] = secret_obj
elif token_name.startswith("gkms:"):
key = args.key
if args.target_name:
inv = inventory_reclass(args.inventory_path)
kap_inv_params = inv['nodes'][args.target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(args.target_name))
key = kap_inv_params['secrets']['gkms']['key']
if not key:
raise KapitanError("No KMS key specified. Use --key or specify it in parameters.kapitan.secrets.gkms.key and use --target")
type_name, token_path = token_name.split(":")
tag = '?{{gkms:{}}}'.format(token_path)
secret_obj = ref_controller[tag]
secret_obj.update_key(key)
ref_controller[tag] = secret_obj
elif token_name.startswith("awskms:"):
key = args.key
if args.target_name:
inv = inventory_reclass(args.inventory_path)
kap_inv_params = inv['nodes'][args.target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(args.target_name))
key = kap_inv_params['secrets']['awskms']['key']
if not key:
raise KapitanError("No KMS key specified. Use --key or specify it in parameters.kapitan.secrets.awskms.key and use --target")
type_name, token_path = token_name.split(":")
tag = '?{{awskms:{}}}'.format(token_path)
secret_obj = ref_controller[tag]
secret_obj.update_key(key)
ref_controller[tag] = secret_obj
else:
fatal_error("Invalid token: {name}. Try using gpg/gkms/awskms:{name}".format(name=token_name))
def secret_reveal(args, ref_controller):
"Reveal secrets in file_name"
revealer = Revealer(ref_controller)
file_name = args.file
if file_name is None:
fatal_error('--file is required with --reveal')
try:
if file_name == '-':
out = revealer.reveal_raw_file(None)
sys.stdout.write(out)
elif file_name:
for rev_obj in revealer.reveal_path(file_name):
sys.stdout.write(rev_obj.content)
except (RefHashMismatchError, KeyError):
raise KapitanError("Reveal failed for file {name}".format(name=file_name))
def secret_update_validate(args, ref_controller):
"Validate and/or update target secrets"
# update gpg recipients/gkms/awskms key for all secrets in secrets_path
# use --secrets-path to set scanning path
inv = inventory_reclass(args.inventory_path)
targets = set(inv['nodes'].keys())
secrets_path = os.path.abspath(args.secrets_path)
target_token_paths = search_target_token_paths(secrets_path, targets)
ret_code = 0
for target_name, token_paths in target_token_paths.items():
kap_inv_params = inv['nodes'][target_name]['parameters']['kapitan']
if 'secrets' not in kap_inv_params:
raise KapitanError("parameters.kapitan.secrets not defined in {}".format(target_name))
try:
recipients = kap_inv_params['secrets']['gpg']['recipients']
except KeyError:
recipients = None
try:
gkey = kap_inv_params['secrets']['gkms']['key']
except KeyError:
gkey = None
try:
awskey = kap_inv_params['secrets']['awskms']['key']
except KeyError:
awskey = None
for token_path in token_paths:
if token_path.startswith("?{gpg:"):
if not recipients:
logger.debug("secret_update_validate: target: %s has no inventory gpg recipients, skipping %s", target_name, token_path)
continue
secret_obj = ref_controller[token_path]
target_fingerprints = set(lookup_fingerprints(recipients))
secret_fingerprints = set(lookup_fingerprints(secret_obj.recipients))
if target_fingerprints != secret_fingerprints:
if args.validate_targets:
logger.info("%s recipient mismatch", token_path)
to_remove = secret_fingerprints.difference(target_fingerprints)
to_add = target_fingerprints.difference(secret_fingerprints)
if to_remove:
logger.info("%s needs removal", to_remove)
if to_add:
logger.info("%s needs addition", to_add)
ret_code = 1
else:
new_recipients = [dict([("fingerprint", f), ]) for f in target_fingerprints]
secret_obj.update_recipients(new_recipients)
ref_controller[token_path] = secret_obj
elif token_path.startswith("?{gkms:"):
if not gkey:
logger.debug("secret_update_validate: target: %s has no inventory gkms key, skipping %s", target_name, token_path)
continue
secret_obj = ref_controller[token_path]
if gkey != secret_obj.key:
if args.validate_targets:
logger.info("%s key mismatch", token_path)
ret_code = 1
else:
secret_obj.update_key(gkey)
ref_controller[token_path] = secret_obj
elif token_path.startswith("?{awskms:"):
if not awskey:
logger.debug("secret_update_validate: target: %s has no inventory awskms key, skipping %s", target_name, token_path)
continue
secret_obj = ref_controller[token_path]
if awskey != secret_obj.key:
if args.validate_targets:
logger.info("%s key mismatch", token_path)
ret_code = 1
else:
secret_obj.update_key(awskey)
ref_controller[token_path] = secret_obj
else:
logger.info("Invalid secret %s, could not get type, skipping", token_path)
ret_code = 1
sys.exit(ret_code)
| 51.789381
| 165
| 0.578654
|
fa61b9be238affb2ae59138f2b90d19f34321477
| 246
|
py
|
Python
|
apiempleados/serializers.py
|
acroooo/registroempleados-spa
|
77310967ef10dc769fc7ab60d51dfbb19504ff9e
|
[
"MIT"
] | null | null | null |
apiempleados/serializers.py
|
acroooo/registroempleados-spa
|
77310967ef10dc769fc7ab60d51dfbb19504ff9e
|
[
"MIT"
] | null | null | null |
apiempleados/serializers.py
|
acroooo/registroempleados-spa
|
77310967ef10dc769fc7ab60d51dfbb19504ff9e
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import *
class EmpleadoSerializer(serializers.ModelSerializer):
class Meta:
model = Empleado
fields = ('id', 'nombre_completo', 'email', 'contacto', 'direccion')
| 22.363636
| 76
| 0.678862
|
cd20d67d9a3c41ad13867ff0a6fdff890f2f16c2
| 8,743
|
py
|
Python
|
python/src/experiments_v2/greedy_agent_utils.py
|
jotaporras/ts_mcfrl
|
c8c77a8fbd58e80e926e6705320ca8bc1979efdd
|
[
"MIT"
] | null | null | null |
python/src/experiments_v2/greedy_agent_utils.py
|
jotaporras/ts_mcfrl
|
c8c77a8fbd58e80e926e6705320ca8bc1979efdd
|
[
"MIT"
] | 5
|
2020-09-26T01:26:21.000Z
|
2022-02-10T02:45:51.000Z
|
python/src/experiments_v2/greedy_agent_utils.py
|
jotaporras/ts_mcfrl
|
c8c77a8fbd58e80e926e6705320ca8bc1979efdd
|
[
"MIT"
] | null | null | null |
import logging
import random
from typing import Tuple
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from envs import network_flow_env_builder
from pytorch_lightning.loggers import WandbLogger
from shipping_allocation.envs.network_flow_env import (
EnvironmentParameters,
ShippingFacilityEnvironment,
)
from torch import Tensor
from torch.optim import Adam, Optimizer
from torch.utils.data import DataLoader
import agents
from agents import Agent
from dqn.dqn_common import ShippingFacilityEpisodesDataset
from experiment_utils import report_generator
from experiments_v2.ptl_callbacks import (
MyPrintingCallback,
WandbDataUploader,
ShippingFacilityEnvironmentStorageCallback,
)
# Num epochs == num EPs.
class GreedyAgentRLModel(pl.LightningModule):
"""
This runner is used for greedy agents or agents that
don't need to use the PTL functions for updating a neural network.
"""
environment_parameters: EnvironmentParameters
agent: Agent
DEBUG = False
def __init__(
self,
agent,
env, # TODO#: ShippingAssignmentEnvironment,
experiment_name="",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.agent = agent
self.env = env
self.physical_network = self.env.physical_network
self.experiment_name = experiment_name
# Running values
self.state = self.env.reset()
self.done = False
self.episode_counter = 0
# Metrics
self.episode_reward = 0.0
self.running_reward = 0.0
self.actions = []
self.episode_rewards = []
self.info = {}
self.episodes_info = []
# debug var for env reset
self.was_reset = True
def forward(self, *args, **kwargs):
pass # do nothing related to NNs.
def training_step(self, step_info: Tuple[int, int, int], num_batch):
"""
A step of simulation. Step_info is a tuple of three integers,
see ShippingFacilityEpisodesDataset for the specification
Args:
step_info: (step, num_order, ep_start)
num_batch:
Returns:
"""
step, order, ep_start = step_info
logging.debug("Getting into training step")
if ep_start:
logging.info(f"Starting episode {self.episode_counter}")
if not self.was_reset:
logging.error("ERROR!!! EXPECTED ENV TO BE RESET.")
else:
self.was_reset = False
action = self.agent.get_action(self.state)
# the agent observes the first state and chooses an action
# environment steps with the agent's action and returns new state and reward
next_state, reward, done, info = self.env.step(action)
# print(f"Got reward {reward} done {done}")
self.agent.train((self.state, action, next_state, reward, done))
self.state = next_state
self.episode_reward += reward
if done:
# update the info to store the reports
self.info = info
# Render the current state of the environment
self.env.render()
self.actions.append(action)
self.episode_rewards.append(reward)
shim = (
torch.ones(2, 2, requires_grad=True) - 1
).sum() # a dummy operation to trick ptl
# result = pl.TrainResult(
# minimize=shim
# ) # use the train result just for logging purposes.
self.log("reward", reward)
self.log("episode_reward", self.episode_reward)
self.log("episodes", self.episode_counter)
return shim
def training_epoch_end(self, outputs):
"""
This is triggered when the greedy dataset reaches the end of an episode.
Args:
outputs:
Returns:
"""
logging.info(f"Finishing episode {self.episode_counter}")
# Finished one episode, store reports
logging.info("Finished episode, storing information")
self.episodes_info.append(self.info)
self._wandb_custom_metrics(self.info)
self.episode_counter += 1
self._reset_env_and_metrics()
# return outputs
def _reset_env_and_metrics(self):
logging.info(
f"=========== starting episode {self.episode_counter} loop ==========="
)
logging.debug("Initial environment: ")
self.env.render()
self.state = self.env.reset()
self.done = False
self.episode_reward = 0.0
self.actions = []
self.episode_rewards = []
self.info = {}
self.was_reset = True # Making sure PTL is doing its job.
def train_dataloader(self) -> DataLoader:
"""
This custom dataloader forces to run one step at a time (batching doesn't make sense here.)
it's just a fancy iterator.
"""
return DataLoader(
dataset=ShippingFacilityEpisodesDataset(
num_steps=self.env.num_steps,
orders_per_day=self.env.order_generator.orders_per_day,
),
batch_size=1,
shuffle=False,
)
def _wandb_custom_metrics(self, info):
wandb_metrics = report_generator.convert_info_into_metrics_summary_dict(info)
logging.info(
f"Episode {self.episode_counter} had {wandb_metrics['big_m_count']} BigMs"
)
logging.info("Finished episode with greedy runner, logging metrics to wandb:")
logging.info(wandb_metrics)
wandb.log(
wandb_metrics,
commit=False,
)
def configure_optimizers(self):
# return [
# Adam([torch.ones(2, 2, requires_grad=True)])
# ] # shouldn't use it at all.
return Adam([torch.ones(2, 2, requires_grad=True)])
def backward(self, trainer, loss: Tensor, optimizer: Optimizer) -> None:
return
def main():
config_dict = {
"env": {
"num_dcs": 3,
"num_customers": 5,
"num_commodities": 3,
"orders_per_day": 2,
"dcs_per_customer": 2,
"demand_mean": 500,
"demand_var": 150,
"num_steps": 10, # steps per episode
"big_m_factor": 10000, # how many times the customer cost is the big m.
},
"hps": {
"env": "shipping-v0", # openai env ID.
"episode_length": 30, # todo isn't this an env thing?
"max_episodes": 5, # to do is this num episodes, is it being used?
"batch_size": 30,
"sync_rate": 2, # Rate to sync the target and learning network.
},
"seed": 0,
"agent": "best_fit"
# "agent": "random_valid"
}
torch.manual_seed(config_dict["seed"])
np.random.seed(config_dict["seed"])
random.seed(config_dict["seed"]) # not sure if actually used
np.random.seed(config_dict["seed"])
run = wandb.init( # todo debugging why wrong project and experiment
config=config_dict,
project="rl_warehouse_assignment",
name="best_fit_few_warehouses_debugreward",
)
config = wandb.config
environment_config = config.env
hparams = config.hps
experiment_name = f"gr_{config.agent}_few_warehouses_debugreward"
wandb_logger = WandbLogger(
project="rl_warehouse_assignment",
name=experiment_name,
tags=[
# "debug"
# "experiment"
"local_debug"
],
log_model=False,
)
wandb_logger.log_hyperparams(dict(config))
environment_parameters = network_flow_env_builder.build_network_flow_env_parameters(
environment_config, hparams["episode_length"], order_gen="biased"
)
env = ShippingFacilityEnvironment(environment_parameters)
agent = agents.get_agent(env, environment_config, hparams, config.agent)
model = GreedyAgentRLModel(agent, env, experiment_name=experiment_name)
trainer = pl.Trainer(
max_epochs=hparams["max_episodes"],
# early_stop_callback=False,
val_check_interval=100,
logger=wandb_logger,
# log_save_interval=1,
# row_log_interval=1, # the default of this may leave info behind.
callbacks=[
MyPrintingCallback(),
ShippingFacilityEnvironmentStorageCallback(
experiment_name,
base="data/results/",
experiment_uploader=WandbDataUploader(),
),
],
)
trainer.fit(model)
if __name__ == "__main__":
# logging.root.level = logging.INFO
logging.root.level = logging.DEBUG
main()
| 30.044674
| 99
| 0.617065
|
757ce33e806d1c8c2a0b6fa8e711693877c230f0
| 2,284
|
py
|
Python
|
calaccess_processed_filings/models/campaign/form497/base.py
|
ryanvmenezes/django-calaccess-processed-data
|
966635c8438cda440a12f7765af7c79b5bcb3995
|
[
"MIT"
] | null | null | null |
calaccess_processed_filings/models/campaign/form497/base.py
|
ryanvmenezes/django-calaccess-processed-data
|
966635c8438cda440a12f7765af7c79b5bcb3995
|
[
"MIT"
] | null | null | null |
calaccess_processed_filings/models/campaign/form497/base.py
|
ryanvmenezes/django-calaccess-processed-data
|
966635c8438cda440a12f7765af7c79b5bcb3995
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for storing data from Schedule 497, the Late Contribution Reports.
More about the filing: http://calaccess.californiacivicdata.org/documentation/calaccess-forms/f497/
"""
from __future__ import unicode_literals
from django.db import models
from calaccess_processed_filings.models.base import FilingBaseModel
class Form497ItemBase(FilingBaseModel):
"""
Abstract base model for items reported on Schedule 497 filings.
On Schedule 497, campaign filers are required to report late contributions
received or made in the 90 days leading up to an election.
"""
line_item = models.IntegerField(
verbose_name='line item',
db_index=True,
null=False,
help_text='Line number of the filing form where the transaction is '
'itemized (from S497_CD.LINE_ITEM)',
)
date_received = models.DateField(
verbose_name='date received',
db_index=True,
null=True,
help_text='Date the late contribution was received (from S497_CD.'
'CTRIB_DATE, unless NULL then from S497_CD.DATE_THRU)'
)
date_received_thru = models.DateField(
verbose_name='date received thru',
null=True,
help_text='End date for late contributions received over a range of '
'days(from S497_CD.DATE_THRU)',
)
amount_received = models.DecimalField(
verbose_name='amount received',
decimal_places=2,
max_digits=16,
help_text='Dollar amount received (from S497_CD.AMOUNT)',
)
transaction_id = models.CharField(
verbose_name='transaction id',
max_length=20,
db_index=True,
help_text='Identifies a unique transaction across versions of the a '
'given Schedule 497 filing (from S497_CD.TRAN_ID)'
)
memo_reference_number = models.CharField(
verbose_name='memo reference number',
max_length=20,
blank=True,
help_text='Reference number for the memo attached to the transaction '
'(from S497_CD.MEMO_REFNO)',
)
class Meta:
"""
Model options.
"""
app_label = 'calaccess_processed_filings'
abstract = True
| 34.089552
| 99
| 0.657618
|
550208aeff678cbf25e19f2f274e7aed2d511d63
| 20,200
|
py
|
Python
|
coilfm/vae.py
|
anon-coil/coil_gecco
|
6b8aa410a944e1db26c3acdc77af71b3b5d4fe74
|
[
"MIT"
] | 2
|
2022-02-15T08:39:26.000Z
|
2022-02-17T11:51:06.000Z
|
coilfm/vae.py
|
anon-coil/coil_gecco
|
6b8aa410a944e1db26c3acdc77af71b3b5d4fe74
|
[
"MIT"
] | null | null | null |
coilfm/vae.py
|
anon-coil/coil_gecco
|
6b8aa410a944e1db26c3acdc77af71b3b5d4fe74
|
[
"MIT"
] | null | null | null |
from enum import Enum
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.utils.data import DataLoader, TensorDataset, Subset
PI = torch.from_numpy(np.asarray(np.pi))
def create_dataloaders(pts, n_train=0.85, n_val=0.5, num_workers=8,
batch_size=64):
N_train = int(0.85 * len(pts))
N_val = int(0.05 * len(pts))
N_test = len(pts) - N_train - N_val
ds = TensorDataset(torch.from_numpy(pts).float())
rand_indeces = np.random.choice(len(pts), len(pts), replace=False)
train_inds, val_inds, test_inds = np.split(
rand_indeces, [N_train, N_train + N_val])
train_ds = Subset(ds, train_inds)
val_ds = Subset(ds, val_inds)
test_ds = Subset(ds, test_inds)
train_dl = DataLoader(
train_ds, batch_size=batch_size, num_workers=num_workers, shuffle=True,
drop_last=True)
val_dl = DataLoader(
val_ds, batch_size=batch_size, num_workers=num_workers, shuffle=False,
drop_last=False)
test_dl = DataLoader(
test_ds, batch_size=batch_size, num_workers=num_workers, shuffle=False,
drop_last=False)
return train_dl, val_dl, test_dl
# -- Prior ---------------------------------------------------------------- -- #
class GaussianMixture(object):
def __init__(self, mean, std, weights):
"""
Args:
mean: Tensor of shape (N, K, d1, ..., dM). Means of the mixtures.
std: Tensor of shape (N, K, d1, ..., dM). Standard deviation of
mixtures. Must be same shape as mean.
weights: Tensor of shape (N, K) or (1, K). Weights of mixtures.
"""
shape_err_msg = "Mean and std do not have the same shape."
assert mean.shape == std.shape, shape_err_msg
weights_dim_err_msg = ("Expected number of weight dimensions to be 2, "
"instead got {}".format(weights.dim()))
assert weights.dim() == 2, weights_dim_err_msg
shape_err_msg_2 = ("Expected 1st dimension of mean/std to be the same "
"as the one of weights.")
assert mean.shape[1] == weights.shape[1], shape_err_msg_2
self.mean = mean
self.std = std
self.K = mean.shape[1]
self.weights = weights.view(-1, self.K)
self.normal = Normal(mean, std)
def log_prob(self, input):
"""
Args:
x: Tensor of shape (N, {1, K}, d1, ..., dM) or
(L, N, {1, K}, d1, ..., dM).
Returns
logp: Tensor of shape (N, {1, K}, 1) or (L, N, {1, K}, 1) similar
to input shape.
"""
if len(input.shape) == len(self.mean.shape):
if self.mean.shape[0] > 1:
assert input.shape[0] == self.mean.shape[0], \
"Input dimension 0 is not the same as mean/std"
assert input.shape[2:] == self.mean.shape[2:], \
"Shape error: input.shape[2:] != self.mean.shape[2:]"
weights = self.weights
elif len(input.shape) == len(self.mean.shape) + 1:
weights = self.weights.unsqueeze(0)
else:
raise TypeError("Input shape is not compatible")
log_wx = self.normal.log_prob(input).sum(-1) + torch.log(weights)
logp = torch.logsumexp(log_wx, -1, keepdim=True)
return logp
def sample(self, sample_shape=[1]):
z_sampler = Normal(self.mean, self.std)
y_sampler = Categorical(self.weights)
y = y_sampler.sample(sample_shape=sample_shape)
z = z_sampler.sample(sample_shape=sample_shape)
return z[torch.arange(z.shape[0]), 0, y.flatten().long(), :]
class Reshape(nn.Module):
def __init__(self, shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(-1, *self.shape)
class MixturePrior(nn.Module):
def __init__(self, n_mixture, num_inputs, num_latent, device="cuda:0"):
super(MixturePrior, self).__init__()
self.n_mixture = n_mixture
self.mixture_weights = torch.ones(1, self.n_mixture) / self.n_mixture
self.mixture_weights = self.mixture_weights.to(device)
# n_mixture x n_mixture
self.idle_input = torch.eye(n_mixture, n_mixture, requires_grad=False)
self.idle_input = self.idle_input.to(device)
self.idle_encoder = nn.Linear(n_mixture, num_inputs)
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 256),
nn.ReLU(True),
nn.Linear(256, 256),
nn.ReLU(True))
self.z_mean = nn.Sequential(
nn.Linear(256, num_latent),
Reshape([n_mixture, num_latent]))
self.z_logvar = nn.Sequential(
nn.Linear(256, num_latent),
Reshape([n_mixture, num_latent]),
nn.Hardtanh(min_val=-6., max_val=0.))
def forward(self):
# n_mixture, num_inputs
h1 = self.idle_encoder(self.idle_input)
h2 = self.encoder(h1)
z_mean = self.z_mean(h2)
z_logvar = self.z_logvar(h2)
mix_dist = GaussianMixture(
z_mean,
torch.exp(0.5 * z_logvar),
self.mixture_weights
)
return mix_dist
class FixedPrior(nn.Module):
def __init__(self, device="cuda:0", max_val=2., std=0.6):
super(FixedPrior, self).__init__()
self.means = torch.Tensor([
[0., 0.],
[0., -max_val],
[0., max_val],
[max_val, 0.],
[-max_val, 0.],
[-max_val, -max_val],
[max_val, max_val],
[-max_val, max_val],
[max_val, -max_val]
]).unsqueeze(0).to(device)
self.std = torch.Tensor(
[0.6]).view(1, 1).repeat(9, 2).unsqueeze(0).to(device)
self.mixture_weights = (torch.ones(1, 9) / 9).to(device)
def forward(self):
mix_dist = GaussianMixture(
self.means,
self.std,
self.mixture_weights
)
return mix_dist
def log_standard_normal(x, reduction=None, dim=None):
log_p = -0.5 * torch.log(2. * PI) - 0.5 * x**2.
if reduction == 'avg':
return torch.mean(log_p, dim)
elif reduction == 'sum':
return torch.sum(log_p, dim)
else:
return log_p
class FlowPrior(nn.Module):
def __init__(self, num_latent=2, num_flows=3, num_outputs=2,
num_hidden=256, num_hidden_layers=2):
super(FlowPrior, self).__init__()
# scale (s) network
def nets():
layers = [
nn.Linear(num_latent // 2, num_hidden),
nn.LeakyReLU()
]
for _ in range(num_hidden_layers):
layers.append(nn.Linear(num_hidden, num_hidden))
layers.append(nn.LeakyReLU())
layers.append(nn.Linear(num_hidden, num_latent // 2))
layers.append(nn.Tanh())
return nn.Sequential(*layers)
# translation (t) network
def nett():
layers = [
nn.Linear(num_latent // 2, num_hidden),
nn.LeakyReLU()
]
for _ in range(num_hidden_layers):
layers.append(nn.Linear(num_hidden, num_hidden))
layers.append(nn.LeakyReLU())
layers.append(nn.Linear(num_hidden, num_latent // 2))
return nn.Sequential(*layers)
self.num_outputs = num_outputs
self.t = torch.nn.ModuleList([nett() for _ in range(num_flows)])
self.s = torch.nn.ModuleList([nets() for _ in range(num_flows)])
self.num_flows = num_flows
def coupling(self, x, index, forward=True):
(xa, xb) = torch.chunk(x, 2, 1)
s = self.s[index](xa)
t = self.t[index](xa)
if forward:
# yb = f^{-1}(x)
yb = (xb - t) * torch.exp(-s)
else:
# xb = f(y)
yb = torch.exp(s) * xb + t
return torch.cat((xa, yb), 1), s
def permute(self, x):
return x.flip(1)
def f(self, x):
log_det_J, z = x.new_zeros(x.shape[0]), x
for i in range(self.num_flows):
z, s = self.coupling(z, i, forward=True)
z = self.permute(z)
log_det_J = log_det_J - s.sum(dim=1)
return z, log_det_J
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x, _ = self.coupling(x, i, forward=False)
return x
def sample(self, batch_size, z=None):
if z is None:
z = torch.randn(batch_size, self.num_outputs)
x = self.f_inv(z)
return x.view(-1, self.num_outputs)
def log_prob(self, x):
z, log_det_J = self.f(x)
log_p = (log_standard_normal(z) + log_det_J.unsqueeze(1))
return log_p
# -- Architecture --------------------------------------------------------- -- #
class EncoderMLP(nn.Module):
def __init__(self, num_inputs=2, num_hidden=300, num_latent=2,
num_layers=1):
super(EncoderMLP, self).__init__()
layers = [
nn.Linear(num_inputs, num_hidden),
nn.ReLU(inplace=True)
]
for _ in range(num_layers):
layers.append(nn.Linear(num_hidden, num_hidden))
layers.append(nn.ReLU(inplace=True))
self.encode = nn.Sequential(*layers)
self.mean = nn.Linear(num_hidden, num_latent)
self.logvar = nn.Sequential(
nn.Linear(num_hidden, num_latent),
nn.Hardtanh(min_val=-6, max_val=1.))
def forward(self, input):
h = self.encode(input)
mean = self.mean(h)
logvar = self.logvar(h)
return mean, logvar
class DecoderMLP(nn.Module):
def __init__(self, num_inputs=2, num_latent=2, num_hidden=300,
lik="gaussian", num_layers=1):
super(DecoderMLP, self).__init__()
layers = [
nn.Linear(num_latent, num_hidden),
nn.ReLU(inplace=True)
]
for _ in range(num_layers):
layers.append(nn.Linear(num_hidden, num_hidden))
layers.append(nn.ReLU(inplace=True))
self.decode = nn.Sequential(*layers)
if lik == "gaussian":
self.x_mean = nn.Sequential(
nn.Linear(num_hidden, num_inputs)
)
self.x_logvar = nn.Sequential(
nn.Linear(num_hidden, num_inputs),
nn.Hardtanh(min_val=-6, max_val=-2)
)
elif lik == "mse":
self.x = nn.Linear(num_hidden, num_inputs)
self.lik = lik
def forward(self, input):
h = self.decode(input)
if self.lik == "mse":
rec = self.x(h)
elif self.lik == "gaussian":
x_mean = self.x_mean(h)
x_logvar = self.x_logvar(h)
rec = torch.cat((x_mean, x_logvar), -1)
return rec
def log_normal_diag(x, mu, log_var, reduction=None, dim=None):
PI = torch.from_numpy(np.asarray(np.pi))
log_p = -0.5 * torch.log(
2. * PI) - 0.5 * log_var - 0.5 * torch.exp(-log_var) * (x - mu)**2.
if reduction == 'avg':
return torch.mean(log_p, dim)
elif reduction == 'sum':
return torch.sum(log_p, dim)
else:
return log_p
class VAE(nn.Module):
def __init__(self, num_inputs=2, num_latent=2, num_hidden=256, lik="mse",
prior="gaussian", num_mixture=9, num_flows=3,
num_flow_layers=2, num_autoencoding_layers=1):
super(VAE, self).__init__()
self.encoder = EncoderMLP(
num_inputs=num_inputs, num_latent=num_latent,
num_hidden=num_hidden, num_layers=num_autoencoding_layers)
self.decoder = DecoderMLP(
num_inputs=num_inputs, num_latent=num_latent,
num_hidden=num_hidden, lik=lik, num_layers=num_autoencoding_layers)
self.lik = lik
self.prior = prior
if self.prior == "mixture":
self.pz = MixturePrior(num_mixture, num_inputs, num_latent)
elif self.prior == "fixedmixture":
self.pz = FixedPrior()
elif self.prior == "flow":
self.pz = FlowPrior(num_outputs=num_latent,
num_latent=num_latent,
num_hidden=num_hidden, num_flows=num_flows,
num_hidden_layers=num_flow_layers)
def forward(self, input):
mean, logvar = self.encoder(input)
z = self.reparameterize(mean, logvar)
rec = self.decoder(z)
return rec, mean, logvar, z
def loss(self, x, rec, mu, logvar, z, kl_weight=1.0):
if self.lik == "mse":
lik = F.mse_loss(rec, x, reduction='sum') / x.shape[0]
elif self.lik == "gaussian":
latent_dim = rec.shape[1] // 2
mu_x, logvar_x = torch.split(rec, [latent_dim, latent_dim], dim=1)
lik = - log_normal_diag(x, mu_x, logvar_x).sum() / x.shape[0]
else:
raise NotImplementedError
if self.prior == "gaussian":
KL = -0.5 * torch.sum(
1 + logvar - mu.pow(2) - logvar.exp()) / mu.shape[0]
elif self.prior in ["mixture", "fixedmixture"]:
qz = Normal(mu, torch.exp(0.5 * logvar))
log_pz = self.pz().log_prob(z.unsqueeze(1)).view(z.shape[0], 1)
log_qz = qz.log_prob(z).view(z.shape[0], -1)
KL = (log_qz.sum(-1, keepdim=True) - log_pz.sum(
-1, keepdim=True)).mean()
elif self.prior == "flow":
KL = (log_normal_diag(
z, mu, logvar) - self.pz.log_prob(z)).sum() / mu.shape[0]
else:
raise NotImplementedError
loss = lik + (kl_weight * KL)
stats = {
"loss": loss.detach().item(),
"kl": KL.detach().item(),
"lik": lik.detach().item()
}
return loss, stats
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def save(self, path):
torch.save(self.state_dict(), path)
def express(self, z):
# Use VAE as a generator, given numpy latent vec return full numpy
# phenotype
if self.prior == "flow":
latent = torch.from_numpy(z).float()
latent = self.pz.sample(batch_size=latent.shape[0], z=latent)
else:
latent = torch.from_numpy(z).float()
pheno = self.decoder(latent)
return pheno.detach().numpy()
# -- Utils ---------------------------------------------------------------- -- #
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
print(' '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
# -- Training ------------------------------------------------------------- -- #
def train_vae(model, train_dl, val_dl,
batch_size=100, lr=5e-4, epochs=100, beta=1.,
print_freq=100, notebook_display=False):
optimizer = optim.Adam(model.parameters(), lr=lr)
for epoch in range(epochs):
losses = AverageMeter('Loss', ':.4f')
kls = AverageMeter('KL', ':.4f')
liks = AverageMeter('Lik', ':.4f')
progress = ProgressMeter(
len(train_dl),
[losses, kls, liks],
prefix="Epoch: [{}]".format(epoch))
train_samples = []
train_rec = []
for i, feats in enumerate(train_dl):
feats = feats[0].to("cuda:0")
rec, mu, logvar, z = model(feats)
loss, stats = model.loss(
feats, rec, mu, logvar, z, kl_weight=min(epoch / 10, beta))
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(stats["loss"], feats.shape[0])
kls.update(stats["kl"], feats.shape[0])
liks.update(stats["lik"], feats.shape[0])
if i % print_freq == 0:
progress.display(i)
train_samples.append(feats.cpu().numpy())
train_rec.append(rec.detach().cpu().numpy())
train_samples = np.concatenate(train_samples, 0)
train_rec = np.concatenate(train_rec, 0)
progress.display_summary()
val_samples = []
val_rec = []
with torch.no_grad():
for i, feats in enumerate(val_dl):
feats = feats[0].to("cuda:0")
rec, _, _, _ = model(feats)
val_samples.append(feats.cpu().numpy())
val_rec.append(rec.detach().cpu().numpy())
val_samples = np.concatenate(val_samples, 0)
val_rec = np.concatenate(val_rec, 0)
if notebook_display:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.scatter(
train_samples[::100, 0], train_samples[::100, 1], c="r", label="data")
ax1.scatter(train_rec[::100, 0],
train_rec[::100, 1], c="b", label="rec")
ax2.scatter(val_samples[::100, 0], val_samples[::100, 1], c="r",
label="data")
ax2.scatter(val_rec[::100, 0],
val_rec[::100, 1], c="b", label="rec")
plt.show()
def sample_2d_pts(res=15):
x = np.linspace(-3, 3, res)
y = np.linspace(-3, 3, res)
X, Y = np.meshgrid(x, y) # grid of point
pts = np.c_[X.flatten(), Y.flatten()]
return pts
if __name__ == "__main__":
# ---- Code Testing ---- #
# - Load Data
from dejong import create_dataset
dataset = create_dataset()
train_dl, val_dl, test_dl = create_dataloaders(dataset)
# - Setup and Train VAE
model_file = 'dejong_test.pt'
model = VAE(prior="fixedmixture").to("cuda:0") # use constructor defaults
# train or load
# train_vae(model, train_dl, val_dl)
# torch.save(model.state_dict(), model_file)
model.load_state_dict(torch.load(model_file))
model.to('cpu')
# - 'Express' from genotype
pts = sample_2d_pts(15)
model.express(pts)
print("Done")
| 34.353741
| 86
| 0.552327
|
0ca0ce30ef51e5d9290b30218bab5958c341223b
| 366
|
py
|
Python
|
pyft/__init__.py
|
coninggu/pyft
|
340c7c104f3da447efaf00c1e45903cf4a672b7f
|
[
"MIT"
] | null | null | null |
pyft/__init__.py
|
coninggu/pyft
|
340c7c104f3da447efaf00c1e45903cf4a672b7f
|
[
"MIT"
] | null | null | null |
pyft/__init__.py
|
coninggu/pyft
|
340c7c104f3da447efaf00c1e45903cf4a672b7f
|
[
"MIT"
] | null | null | null |
from pkg_resources import get_distribution
pkg = get_distribution('pyft')
pkg_meta = {}
for meta in pkg._get_metadata(pkg.PKG_INFO):
try:
k, v = meta.split(': ', 1)
pkg_meta[k] = v
except ValueError:
continue
__version__ = get_distribution('pyft').version
__author_email__ = pkg_meta['Author-email']
__url__ = pkg_meta['Home-page']
| 24.4
| 46
| 0.685792
|
48456492cc3abec1f2a8f867877781dd1db2a4b3
| 2,890
|
py
|
Python
|
tests/manual/service_endpoint_test.py
|
joergeschmann/counselor
|
455ba2a07d25c5ac946f9b3dab0678eec7d46c40
|
[
"MIT"
] | null | null | null |
tests/manual/service_endpoint_test.py
|
joergeschmann/counselor
|
455ba2a07d25c5ac946f9b3dab0678eec7d46c40
|
[
"MIT"
] | null | null | null |
tests/manual/service_endpoint_test.py
|
joergeschmann/counselor
|
455ba2a07d25c5ac946f9b3dab0678eec7d46c40
|
[
"MIT"
] | null | null | null |
import logging
import unittest
from counselor.endpoint.entity import ServiceDefinition
from counselor.endpoint.http_endpoint import EndpointConfig
from counselor.endpoint.service_endpoint import ServiceEndpoint
from counselor.filter import Filter, Operators
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
class ServiceTests(unittest.TestCase):
def setUp(self):
LOGGER.info("Setting up")
self.test_service_key = "unit-test-service"
self.consul_config = EndpointConfig(host="127.0.0.1", port=8500, version="v1")
self.service_endpoint = ServiceEndpoint(self.consul_config)
def tearDown(self):
LOGGER.info("Cleaning up")
self.service_endpoint.deregister(self.test_service_key)
def test_services_registration(self):
service_definition = ServiceDefinition(
key=self.test_service_key,
address="127.0.0.1",
port=61123,
tags=["unit", "test", "v1"],
meta={
"version": "1.0",
"status": "active",
"base_time": "1573639530",
}
)
register_status = self.service_endpoint.register(service_definition)
self.assertTrue(register_status.successful)
get_status, found_service_definition = self.service_endpoint.get_details(service_definition.key)
self.assertTrue(get_status.successful, get_status.as_string())
self.assertEqual(service_definition.key, found_service_definition.key)
self.assertEqual(service_definition.port, found_service_definition.port)
self.assertEqual(service_definition.meta["base_time"], found_service_definition.meta["base_time"])
service_definition.meta["version"] = "v1.1"
update_status = self.service_endpoint.update(service_definition)
self.assertTrue(update_status.successful, update_status.as_string())
get_status, found_service_definition = self.service_endpoint.get_details(service_definition.key)
self.assertTrue(get_status.successful, get_status.as_string())
self.assertEqual(service_definition.meta["version"], found_service_definition.meta["version"])
filter_expression = Filter.new_meta_filter("status", Operators.OPERATOR_EQUALITY, "active").as_expression()
query_tuple = ('filter', filter_expression)
filter_tuples = [query_tuple]
search_status, found_services = self.service_endpoint.search(filter_tuples)
self.assertTrue(search_status.successful, search_status.as_string())
self.assertEqual(service_definition.meta["version"], found_services[0].meta["version"])
deregister_status = self.service_endpoint.deregister(service_definition.key)
self.assertTrue(deregister_status.successful, deregister_status.as_string())
if __name__ == '__main__':
unittest.main()
| 42.5
| 115
| 0.720069
|
d529677adf1900df1c7e753c50094f17fab7b024
| 9,045
|
py
|
Python
|
Project1/lvx_parser/_frame.py
|
GCaptainNemo/3D-reconstruction-PCL
|
5ec608e21a2762d1b4c8202b50fa8f2ccfc8315b
|
[
"MIT"
] | 9
|
2021-05-02T18:27:39.000Z
|
2022-03-11T08:36:27.000Z
|
Project1/lvx_parser/_frame.py
|
GCaptainNemo/3D-reconstruction-PCL
|
5ec608e21a2762d1b4c8202b50fa8f2ccfc8315b
|
[
"MIT"
] | 1
|
2021-05-25T07:31:01.000Z
|
2021-05-25T07:31:01.000Z
|
Project1/lvx_parser/_frame.py
|
GCaptainNemo/3D-reconstruction-PCL
|
5ec608e21a2762d1b4c8202b50fa8f2ccfc8315b
|
[
"MIT"
] | 3
|
2022-01-17T13:35:47.000Z
|
2022-03-13T10:00:42.000Z
|
import struct
def _floatfrombytes(bs):
hs = ''.join(['%02X' % x for x in bs])
return float.fromhex(hs)
class DataType:
CARTESIAN_MID = 0
SPHERICAL_MID = 1
CARTESIAN_SINGLE = 2
SPHERAICAL_SINGLE = 3
CARTESIAN_DOUBLE = 4
SPHERAICAL_DOUBLE = 5
IMU_INFO = 6
class Point0:
def __init__(self, bs):
self.bs = bs
@property
def x(self):
return int.from_bytes(self.bs[:4], 'little', signed=True) / 1000
@property
def y(self):
return int.from_bytes(self.bs[4:8], 'little', signed=True) / 1000
@property
def z(self):
return int.from_bytes(self.bs[8:12], 'little', signed=True) / 1000
@property
def reflectivity(self):
return int.from_bytes(self.bs[12:13], 'little')
class Point1:
def __init__(self, bs):
self.bs = bs
@property
def depth(self):
return int.from_bytes(self.bs[:4], 'little', signed=True) / 1000
@property
def theta(self):
return int.from_bytes(self.bs[4:6], 'little')
@property
def phi(self):
return int.from_bytes(self.bs[6:8], 'little')
@property
def reflectivity(self):
return int.from_bytes(self.bs[8:9], 'little')
class Point2:
def __init__(self, bs):
self.bs = bs
@property
def x(self):
return int.from_bytes(self.bs[:4], 'little', signed=True) / 1000
@property
def y(self):
return int.from_bytes(self.bs[4:8], 'little', signed=True) / 1000
@property
def z(self):
return int.from_bytes(self.bs[8:12], 'little', signed=True) / 1000
@property
def reflectivity(self):
return int.from_bytes(self.bs[12:13], 'little')
@property
def tag(self):
return int.from_bytes(self.bs[13:14], 'little')
class Point3:
def __init__(self, bs):
self.bs = bs
@property
def depth(self):
return int.from_bytes(self.bs[:4], 'little', signed=True) / 1000
@property
def theta(self):
return int.from_bytes(self.bs[4:6], 'little')
@property
def phi(self):
return int.from_bytes(self.bs[6:8], 'little')
@property
def reflectivity(self):
return int.from_bytes(self.bs[8:9], 'little')
@property
def tag(self):
return int.from_bytes(self.bs[9:10], 'little')
class Point4:
def __init__(self, bs):
self.bs = bs
@property
def x1(self):
return int.from_bytes(self.bs[:4], 'little', signed=True) / 1000
@property
def y1(self):
return int.from_bytes(self.bs[:8], 'little', signed=True) / 1000
@property
def z1(self):
return int.from_bytes(self.bs[:12], 'little', signed=True) / 1000
@property
def reflectivity1(self):
return int.from_bytes(self.bs[:13], 'little')
@property
def tag1(self):
return int.from_bytes(self.bs[:14], 'little')
@property
def x2(self):
return int.from_bytes(self.bs[:18], 'little', signed=True) / 1000
@property
def y2(self):
return int.from_bytes(self.bs[:22], 'little', signed=True) / 1000
@property
def z2(self):
return int.from_bytes(self.bs[:26], 'little', signed=True) / 1000
@property
def reflectivity2(self):
return int.from_bytes(self.bs[:27], 'little')
@property
def tag2(self):
return int.from_bytes(self.bs[:28], 'little')
class Point5:
def __init__(self, bs):
self.bs = bs
@property
def theta(self):
return int.from_bytes(self.bs[:2], 'little')
@property
def phi(self):
return int.from_bytes(self.bs[2:4], 'little')
@property
def depth1(self):
return int.from_bytes(self.bs[4:8], 'little', signed=True) / 1000
@property
def reflectivity1(self):
return int.from_bytes(self.bs[8:9], 'little')
@property
def tag1(self):
return int.from_bytes(self.bs[9:10], 'little')
@property
def depth2(self):
return int.from_bytes(self.bs[10:14], 'little', signed=True) / 1000
@property
def reflectivity2(self):
return int.from_bytes(self.bs[14:15], 'little')
@property
def tag2(self):
return int.from_bytes(self.bs[15:16], 'little')
class Point6:
def __init__(self, bs):
self.bs = bs
self.gyro_x, self.gyro_y, self.gyro_z, \
self.acc_x, self.acc_y, self.acc_z = \
struct.unpack("<ffffff", self.bs)
#
# @property
# def gyro_x(self):
# return _floatfrombytes(self.bs[:4])
#
# @property
# def gyro_y(self):
# return _floatfrombytes(self.bs[4:8])
#
# @property
# def gyro_z(self):
# return _floatfrombytes(self.bs[8:12])
#
# @property
# def acc_x(self):
# return _floatfrombytes(self.bs[12:16])
#
# @property
# def acc_y(self):
# return _floatfrombytes(self.bs[16:20])
#
# @property
# def acc_z(self):
# return _floatfrombytes(self.bs[20:24])
class Package:
def __init__(self, bs):
self.bs = bs
@property
def device_index(self):
return int.from_bytes(self.bs[:1], 'little')
@property
def version(self):
return int.from_bytes(self.bs[1:2], 'little')
@property
def slot_id(self):
return int.from_bytes(self.bs[2:3], 'little')
@property
def lidar_id(self):
return int.from_bytes(self.bs[3:4], 'little')
@property
def reserved(self):
return int.from_bytes(self.bs[4:5], 'little')
@property
def status_code(self):
return int.from_bytes(self.bs[5:9], 'little')
@property
def timestamp_type(self):
return int.from_bytes(self.bs[9:10], 'little')
@property
def data_type(self):
return int.from_bytes(self.bs[10:11], 'little')
@property
def timestamp(self):
return int.from_bytes(self.bs[11:19], 'little')
@property
def points(self):
if self.data_type == DataType.CARTESIAN_MID:
point_size = 13
point_count = 100
point_class = Point0
elif self.data_type == DataType.SPHERICAL_MID:
point_size = 9
point_count = 100
point_class = Point1
elif self.data_type == DataType.CARTESIAN_SINGLE:
point_size = 14
point_count = 96
point_class = Point2
elif self.data_type == DataType.SPHERAICAL_SINGLE:
point_size = 10
point_count = 96
point_class = Point3
elif self.data_type == DataType.CARTESIAN_DOUBLE:
point_size = 28
point_count = 48
point_class = Point4
elif self.data_type == DataType.SPHERAICAL_DOUBLE:
point_size = 16
point_count = 48
point_class = Point5
elif self.data_type == DataType.IMU_INFO:
point_size = 24
point_count = 1
point_class = Point6
else:
raise Exception
return [point_class(self.bs[19 + i * point_size: 19 + point_size * (i + 1)]) for i in range(point_count)]
class FrameHeader:
def __init__(self, bs):
self.bs = bs
@property
def current_offset(self):
return int.from_bytes(self.bs[:8], 'little')
@property
def next_offset(self):
return int.from_bytes(self.bs[8:16], 'little')
@property
def frame_index(self):
return int.from_bytes(self.bs[16:24], 'little')
class Frame:
def __init__(self, bs):
self.bs = bs
@property
def frame_header(self):
return FrameHeader(self.bs[:24])
@property
def packages(self):
current_offset = 24
while current_offset < len(self.bs):
pakcage_header = Package(self.bs[current_offset:current_offset + 19])
if pakcage_header.data_type == DataType.CARTESIAN_MID:
point_size = 13
point_count = 100
elif pakcage_header.data_type == DataType.SPHERICAL_MID:
point_size = 9
point_count = 100
elif pakcage_header.data_type == DataType.CARTESIAN_SINGLE:
point_size = 14
point_count = 96
elif pakcage_header.data_type == DataType.SPHERAICAL_SINGLE:
point_size = 10
point_count = 96
elif pakcage_header.data_type == DataType.CARTESIAN_DOUBLE:
point_size = 28
point_count = 48
elif pakcage_header.data_type == DataType.SPHERAICAL_DOUBLE:
point_size = 16
point_count = 48
elif pakcage_header.data_type == DataType.IMU_INFO:
point_size = 24
point_count = 1
else:
raise Exception(pakcage_header.data_type)
yield Package(self.bs[current_offset:current_offset + 19 + point_size * point_count])
current_offset += 19 + point_size * point_count
| 25.550847
| 113
| 0.586844
|
2816bd185777117130189a5980be03c7654024a8
| 1,477
|
py
|
Python
|
build_mapping.py
|
blueset/tonguess-toolbox
|
852924d71efdf2bb7187efc017354eed829ba75e
|
[
"MIT"
] | null | null | null |
build_mapping.py
|
blueset/tonguess-toolbox
|
852924d71efdf2bb7187efc017354eed829ba75e
|
[
"MIT"
] | null | null | null |
build_mapping.py
|
blueset/tonguess-toolbox
|
852924d71efdf2bb7187efc017354eed829ba75e
|
[
"MIT"
] | null | null | null |
from itertools import product
from guess import guess
from typing import Dict, Set, Tuple, List
from multiprocessing import Pool
import pickle
from tqdm import tqdm
import sys
def calculate(i):
idx = i[0]
query = "".join(i[1])
outcomes: Dict[Tuple[int, int], Set[str]] = {}
for iidx, word in enumerate(words):
outcome: Tuple[int, int] = guess(query, word)
if outcome not in outcomes:
outcomes[outcome] = 0
outcomes[outcome] |= 1 << iidx
# mapping[query] = outcomes
# print(query, *[len(j) for j in outcomes.values()])
return (idx, outcomes)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"""
Build guess-outcome mappings.
Usage:
{sys.argv[0]} 3
{sys.argv[0]} 4
""")
exit()
word_len = int(sys.argv[1])
if word_len not in (3, 4):
print("word length must be 3 or 4.")
exit(1)
words: List[str] = [i.strip() for i in open(f"dict{word_len}")]
mapping = {}
with Pool(8) as pool:
prod = enumerate(product("ABCDEFGHIJKLMNOPQRSTUVWXYZ", repeat=word_len))
m = pool.imap_unordered(calculate, prod)
for k, v in tqdm(m, total=26**word_len):
mapping[k] = v
for k, v in enumerate(product("ABCDEFGHIJKLMNOPQRSTUVWXYZ", repeat=word_len)):
if len(set(v)) != len(v):
del mapping[k]
with open(f"mapping{word_len}", "wb") as f:
pickle.dump(mapping, f)
| 25.033898
| 82
| 0.590386
|
35d64237c074a1cb09dfcb2874487e7837249638
| 2,246
|
py
|
Python
|
source/codes/Arcpy_2_SpeciesPoly2Raster.py
|
awilkins/CSC18
|
7467011446f67efb679ab7b04b9196e6ed57202c
|
[
"MIT"
] | 78
|
2018-01-12T13:58:21.000Z
|
2022-03-12T10:32:39.000Z
|
source/codes/Arcpy_2_SpeciesPoly2Raster.py
|
mullenkamp/ecan_python_courses
|
917a8fffd27e11d99444bd0f7e5213c0f848f6fd
|
[
"MIT"
] | 10
|
2019-01-27T16:48:31.000Z
|
2020-06-13T20:15:58.000Z
|
source/codes/Arcpy_2_SpeciesPoly2Raster.py
|
mullenkamp/ecan_python_courses
|
917a8fffd27e11d99444bd0f7e5213c0f848f6fd
|
[
"MIT"
] | 63
|
2018-01-16T09:10:59.000Z
|
2022-02-05T06:06:06.000Z
|
# Import arcpy module so we can use ArcGIS geoprocessing tools
import arcpy
""" This script adds a field into the input shapefile and updates the value
of that field based (range: 1-5) and finally rasterizes the shapefile """
# 1. Get parameters from the toolbox using 'GetParametersAsText' method
#----------------------------------------------------------------------
# --> check ArcGIS help for info how to use methods
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//018v00000047000000
input_species_shp = arcpy.GetParameterAsText(0)
output_path = arcpy.GetParameterAsText(1)
attribute_name = arcpy.GetParameterAsText(2)
presence_value = arcpy.GetParameterAsText(3)
# 2. Add a new field into the input shapefile with 'AddField_management' method
#------------------------------------------------------------------------------
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//001700000047000000
arcpy.AddField_management(in_table=input_species_shp, field_name=attribute_name, field_type="SHORT") # Other possible parameters can be left as default
# 3. Update the presence value for our newly created attribute with 'CalculateField_management' method
#-----------------------------------------------------------------------------------------------------
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//00170000004m000000
arcpy.CalculateField_management(in_table=input_species_shp, field=attribute_name, expression=presence_value)
# 4. Convert polygon to raster using 'PolygonToRaster_conversion' method
#-----------------------------------------------------------------------
# Method info: http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#//001200000030000000
arcpy.PolygonToRaster_conversion(in_features=input_species_shp, value_field=attribute_name, out_rasterdataset=output_path)
# 5. Print info for the user that tool has finished succesfully using 'AddMessage' method
#----------------------------------------------------------------------------------------
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//018v00000007000000
my_message = "Tool finished successfully! Rock on!"
arcpy.AddMessage(my_message)
| 51.045455
| 151
| 0.654497
|
06836d9b4b170bb6be6f0ef48fb9a43861cc8d21
| 391
|
py
|
Python
|
app/email.py
|
MaryamMuchai/Pitch-World
|
1747a3fa20aeaace397d188d656e4fbe74d70b62
|
[
"MIT"
] | null | null | null |
app/email.py
|
MaryamMuchai/Pitch-World
|
1747a3fa20aeaace397d188d656e4fbe74d70b62
|
[
"MIT"
] | null | null | null |
app/email.py
|
MaryamMuchai/Pitch-World
|
1747a3fa20aeaace397d188d656e4fbe74d70b62
|
[
"MIT"
] | 1
|
2021-08-19T06:07:23.000Z
|
2021-08-19T06:07:23.000Z
|
from flask_mail import Message
from flask import render_template
from . import mail
def mail_message(subject,template,to,**kwargs):
sender_email = 'maryammuchai@gmail.com'
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
| 35.545455
| 66
| 0.731458
|
998ccd9a7b79455391241a1106376167eb0156db
| 949
|
py
|
Python
|
meraki_sdk/models/dhcp_lease_time_enum.py
|
meraki/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 37
|
2019-04-24T14:01:33.000Z
|
2022-01-28T01:37:21.000Z
|
meraki_sdk/models/dhcp_lease_time_enum.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 10
|
2019-07-09T16:35:11.000Z
|
2021-12-07T03:47:53.000Z
|
meraki_sdk/models/dhcp_lease_time_enum.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 17
|
2019-04-30T23:53:21.000Z
|
2022-02-07T22:57:44.000Z
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class DhcpLeaseTimeEnum(object):
"""Implementation of the 'DhcpLeaseTime' enum.
The term of DHCP leases if the appliance is running a DHCP server on this
VLAN. One of: '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1
week'
Attributes:
ENUM_30 MINUTES: TODO: type description here.
ENUM_1 HOUR: TODO: type description here.
ENUM_4 HOURS: TODO: type description here.
ENUM_12 HOURS: TODO: type description here.
ENUM_1 DAY: TODO: type description here.
ENUM_1 WEEK: TODO: type description here.
"""
ENUM_30_MINUTES = '30 minutes'
ENUM_1_HOUR = '1 hour'
ENUM_4_HOURS = '4 hours'
ENUM_12_HOURS = '12 hours'
ENUM_1_DAY = '1 day'
ENUM_1_WEEK = '1 week'
| 24.333333
| 95
| 0.613277
|
588101522c1783a6be8136ac70b7c6d5138c6a67
| 4,498
|
py
|
Python
|
openurl.py
|
ectogigamau/dns_markdown
|
ed25dcdca390aa15521519dd1a1255f12ae0e67a
|
[
"BSD-2-Clause"
] | 2
|
2021-11-05T12:19:34.000Z
|
2021-12-18T07:30:18.000Z
|
openurl.py
|
ectogigamau/dns_markdown
|
ed25dcdca390aa15521519dd1a1255f12ae0e67a
|
[
"BSD-2-Clause"
] | null | null | null |
openurl.py
|
ectogigamau/dns_markdown
|
ed25dcdca390aa15521519dd1a1255f12ae0e67a
|
[
"BSD-2-Clause"
] | 2
|
2019-04-15T09:51:37.000Z
|
2021-12-24T11:32:34.000Z
|
import urllib2
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class openurl(object):
firefox_profile = None
browser = None
def __init__(self):
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference("network.http.pipelining", True)
firefox_profile.set_preference("network.http.proxy.pipelining", True)
firefox_profile.set_preference("network.http.pipelining.maxrequests", 8)
firefox_profile.set_preference("content.notify.interval", 500000)
firefox_profile.set_preference("content.notify.ontimer", True)
firefox_profile.set_preference("content.switch.threshold", 250000)
firefox_profile.set_preference("browser.cache.memory.capacity", 65536) # Increase the cache capacity.
firefox_profile.set_preference("browser.startup.homepage", "about:blank")
firefox_profile.set_preference("reader.parse-on-load.enabled", False) # Disable reader, we won't need that.
firefox_profile.set_preference("browser.pocket.enabled", False) # Duck pocket too!
firefox_profile.set_preference("loop.enabled", False)
firefox_profile.set_preference("browser.chrome.toolbar_style", 1) # Text on Toolbar instead of icons
firefox_profile.set_preference("browser.display.show_image_placeholders", False) # Don't show thumbnails on not loaded images.
firefox_profile.set_preference("browser.display.use_document_colors", False) # Don't show document colors.
firefox_profile.set_preference("browser.display.use_document_fonts", 0) # Don't load document fonts.
firefox_profile.set_preference("browser.display.use_system_colors", True) # Use system colors.
firefox_profile.set_preference("browser.formfill.enable", False) # Autofill on forms disabled.
firefox_profile.set_preference("browser.helperApps.deleteTempFileOnExit", True) # Delete temprorary files.
firefox_profile.set_preference("browser.shell.checkDefaultBrowser", False)
firefox_profile.set_preference("browser.startup.homepage", "about:blank")
firefox_profile.set_preference("browser.startup.page", 0) # blank
firefox_profile.set_preference("browser.tabs.forceHide", True) # Disable tabs, We won't need that.
firefox_profile.set_preference("browser.urlbar.autoFill", False) # Disable autofill on URL bar.
firefox_profile.set_preference("browser.urlbar.autocomplete.enabled", False) # Disable autocomplete on URL bar.
firefox_profile.set_preference("browser.urlbar.showPopup", False) # Disable list of URLs when typing on URL bar.
firefox_profile.set_preference("browser.urlbar.showSearch", False) # Disable search bar.
firefox_profile.set_preference("extensions.checkCompatibility", False) # Addon update disabled
firefox_profile.set_preference("extensions.checkUpdateSecurity", False)
firefox_profile.set_preference("extensions.update.autoUpdateEnabled", False)
firefox_profile.set_preference("extensions.update.enabled", False)
firefox_profile.set_preference("general.startup.browser", False)
firefox_profile.set_preference("plugin.default_plugin_disabled", False)
firefox_profile.set_preference("permissions.default.image", 2) # Image load disabled again
#firefox_profile.set_preference('permissions.default.image', 2)
#firefox_profile.set_preference("network.cookie.cookieBehavior", 2) # disable cookie
firefox_profile.set_preference('permissions.default.stylesheet', 2) ## Disable CSS
firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so','false') ## Disable Flash
# set headleess | invisble mode
options = Options()
options.set_headless(headless=True)
self.browser = webdriver.Firefox(firefox_options=options, executable_path = 'geckodriver.exe', firefox_profile=firefox_profile)
#self.browser = webdriver.Firefox(executable_path = 'geckodriver.exe', firefox_profile=firefox_profile)
def get(self, url):
self.browser.get(url)
return self.browser.page_source
def simple_get1(url):
response = requests.get(url)
return response.text
def simple_get2(url):
response = urllib2.urlopen(url)
return response.read()
def close(self):
self.browser.quit()
| 60.783784
| 136
| 0.730547
|
64c3932a9e53bc959335844d981e93065a1d1a47
| 1,682
|
py
|
Python
|
databass/ops/orderby.py
|
MrZhihao/databass-columnar
|
6ecab5af257a20be88bf4e67ad3567c26ce0c964
|
[
"MIT"
] | null | null | null |
databass/ops/orderby.py
|
MrZhihao/databass-columnar
|
6ecab5af257a20be88bf4e67ad3567c26ce0c964
|
[
"MIT"
] | null | null | null |
databass/ops/orderby.py
|
MrZhihao/databass-columnar
|
6ecab5af257a20be88bf4e67ad3567c26ce0c964
|
[
"MIT"
] | null | null | null |
from ..baseops import *
from ..exprs import *
from ..db import Database
from ..schema import *
from ..tuples import *
from ..util import cache, OBTuple
from itertools import chain
from ..columns import ListColumns
import numpy as np
import pandas as pd
class OrderBy(UnaryOp):
"""
"""
def __init__(self, c, order_exprs, ascdescs):
"""
@c child operator
@order_exprs list of Expression objects
@ascdescs list of "asc" or "desc" strings, same length as @order_exprs
"""
super(OrderBy, self).__init__(c)
self.order_exprs = order_exprs
self.ascdescs = ascdescs
def get_col_up_needed(self):
seen = set(self.p.get_col_up_needed())
for e in self.order_exprs:
for attr in e.referenced_attrs:
seen.add((attr.real_tablename, attr.aname))
return list(seen)
def hand_in_result(self):
"""
OrderBy needs to accumulate all of its child
operator's outputs before sorting by the order expressions.
"""
order = [x == "asc" for x in self.ascdescs]
handin_res = self.c.hand_in_result()
if handin_res.is_terminate():
return ListColumns(self.schema, None)
sortby_keys = np.array([expr(handin_res).to_numpy() for expr in self.order_exprs]).T
sortby_keys_df = pd.DataFrame(sortby_keys)
col_idxes = sortby_keys_df.sort_values(by=list(range(len(order))), ascending=order, kind="mergesort").index.to_list()
return ListColumns(self.schema, [col.take(col_idxes) if col else None for col in handin_res])
def __str__(self):
args = ", ".join(["%s %s" % (e, ad)
for (e, ad) in zip(self.order_exprs, self.ascdescs)])
return "ORDERBY(%s)" % args
| 30.035714
| 121
| 0.675386
|
27b02a766910c8bbf8ef29cd2131f7e59f387888
| 6,530
|
py
|
Python
|
cli/bg_s3cli/scripts/utils/utils.py
|
bloomreach/bloomgateway
|
1455a5c03a50b73dcabadd43cf65189ac6052fcb
|
[
"Apache-2.0"
] | 25
|
2016-10-05T11:39:16.000Z
|
2021-01-04T01:55:27.000Z
|
cli/bg_s3cli/scripts/utils/utils.py
|
bloomreach/bloomgateway
|
1455a5c03a50b73dcabadd43cf65189ac6052fcb
|
[
"Apache-2.0"
] | null | null | null |
cli/bg_s3cli/scripts/utils/utils.py
|
bloomreach/bloomgateway
|
1455a5c03a50b73dcabadd43cf65189ac6052fcb
|
[
"Apache-2.0"
] | 4
|
2016-10-05T11:50:05.000Z
|
2021-01-04T01:55:28.000Z
|
#
# Copyright 2016 BloomReach, Inc.
# Copyright 2016 Ronak Kothari <ronak.kothari@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------
# imports relative to cli directory
# -------------------------------------------------------------------
import json
import jsonschema
import time
import s3_util
from bg_s3cli.conf import s3
ACCESS_PHASE = "access"
ERROR_PHASE = "error"
RATELIMITER_MODULE = "ratelimiter"
ACCESS_MODULE = "access"
FALLBACK_MODULE = "fallback"
def get_version_path(cluster_id):
"""
Gives the s3 fullpath of version file of a given cluster_id
"""
base_path = s3.get_cluster_info_base_path()
return base_path + "/" + cluster_id + "/version.json"
def get_version_info(cluster_id):
"""
Gives the version information of a given cluster_id as JSON.
It is a dictonary with version information for each Modules and nginx.conf
"""
version_file_path_s3 = get_version_path(cluster_id)
version_file_contents = s3_util.get_item_to_string(version_file_path_s3)
version_data_dict = json.loads(version_file_contents)
return version_data_dict
def get_cluster_version_path(cluster_id):
"""
Gives s3 full path of cluster_version file of a given cluster_id
"""
base_path = s3.get_cluster_info_base_path()
return "%s/%s/cluster_version.json"%(base_path, cluster_id)
def get_cluster_version_info(cluster_id):
"""
Gives the cluster_version information as JSON
"""
cluster_version_file_path_s3 = get_cluster_version_path(cluster_id)
cluster_version_file_contents = s3_util.get_item_to_string(cluster_version_file_path_s3)
cluster_version_data_dict = json.loads(cluster_version_file_contents)
return cluster_version_data_dict
def get_cluster_info_path(cluster_id):
"""
Gives s3 path for cluster.json files it contains book keeping information about cluster.
"""
base_path = s3.get_cluster_info_base_path()
cluster_version_path = get_cluster_version_path(cluster_id)
cluster_version_file_contents = s3_util.get_item_to_string(cluster_version_path)
cluster_version_data_dict = json.loads(cluster_version_file_contents)
cluster_info_path = "%s/%s/%s/cluster.json"%(base_path, cluster_id, cluster_version_data_dict.get("cluster_version"))
return cluster_info_path
def get_cluster_info(cluster_id):
"""
Returns a dict for cluster.json
"""
cluster_info_file_path_s3 = get_cluster_info_path(cluster_id)
cluster_info_file_contents = s3_util.get_item_to_string(cluster_info_file_path_s3)
cluster_info = json.loads(cluster_info_file_contents)
cluster_info_data_dict = json.loads(cluster_info_file_contents)
return cluster_info_data_dict
def get_module_s3_path(cluster_id, phase, module, module_version):
"""
Gives s3 path for a given module's config/rule for a given version
"""
base_path = s3.get_cluster_info_base_path()
return "%s/%s/modules/%s/%s/%s/%s.rules"%(base_path, cluster_id, phase, module, module_version, module)
def get_module_info(cluster_id, module, phase):
"""
Retruns a dict of module's rule of a current version.
"""
version_info = get_version_info(cluster_id)
module_version = version_info["modules"][phase][module]
s3_module_path = get_module_s3_path(cluster_id, phase, module, module_version)
contents = s3_util.get_item_to_string(s3_module_path)
return json.loads(contents)
def push_cluster_config(cluster_id, cluster_version, cluster_info):
"""
Update the book keeping information about a cluster with id : cluster_id
"""
base_path = s3.get_cluster_info_base_path()
#push cluster info
s3_cluster_info_file_path_new = "%s/%s/%s/cluster.json"%(base_path, cluster_id, cluster_version)
s3_util.put_obj_to_json(s3_cluster_info_file_path_new, cluster_info)
#push cluster version info
s3_cluster_version_file_path_new = "%s/%s/cluster_version.json"%(base_path, cluster_id)
cluster_version_info = {}
cluster_version_info["cluster_version"] = cluster_version
s3_util.put_obj_to_json(s3_cluster_version_file_path_new, cluster_version_info)
def get_existing_rules(cluster_id, phase, module_name, version):
"""
Returns the dict of all the rules of a given module and given version for a cluster
"""
module_rules_file_path_s3 = get_module_s3_path(cluster_id, phase, module_name, version)
rule_file_contents = s3_util.get_item_to_string(module_rules_file_path_s3)
existing_rules = json.loads(rule_file_contents)
return existing_rules
def get_timestamped_version():
return time.strftime("%Y%m%d%.%H%M%S", time.gmtime(time.time()))
#converts a dictionary with unicode key value into byte strings key value recursively
#source - http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def verify_endpoint(endpoint):
"""
check if the given endpoint is in the form host:port
"""
if ':' not in endpoint:
print "endpoint not passed correctly %s"%endpoint
exit(1)
host_port = endpoint.split(':')
host = host_port[0]
port = host_port[1]
if ((host is None or host == "") or (port is None or port == "")):
print "endpoint [%s] not passed correctly. Host/port values mandatory "%endpoint
exit(1)
def verify_endpoints(endpoints):
"""
Check all the endpoints have the needed host:port format
"""
endpoints = endpoints.split(';')
for endpoint in endpoints:
verify_endpoint(endpoint)
def validate(json_obj, schema_def):
"""
Validates json object agains given json schema using jsonchema
"""
try:
jsonschema.validate(json_obj, schema_def)
except jsonschema.exceptions.ValidationError as e:
return (-2, e)
except Exception as e:
return (-1, e)
return (0, None)
| 36.277778
| 121
| 0.751914
|
513f31180eca6c17fd5cc94765ca6afa98a941ec
| 7,110
|
py
|
Python
|
scripts/configs.py
|
lakshaykc/lfm_quant
|
ac6f47c9a36f681920314423e2502f3654c5b592
|
[
"MIT"
] | 6
|
2021-03-21T19:05:55.000Z
|
2022-02-28T03:48:10.000Z
|
scripts/configs.py
|
lakshaykc/lfm_quant
|
ac6f47c9a36f681920314423e2502f3654c5b592
|
[
"MIT"
] | 1
|
2020-07-10T15:31:11.000Z
|
2020-07-21T08:47:04.000Z
|
scripts/configs.py
|
lakshaykc/lfm_quant
|
ac6f47c9a36f681920314423e2502f3654c5b592
|
[
"MIT"
] | 4
|
2021-07-08T14:53:35.000Z
|
2022-02-07T03:06:29.000Z
|
"""Implementation of the configs interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
class _LoadFromFile(argparse.Action):
"""Helper that supports the reading of config from a file"""
def __call__(self, parser, namespace, values, option_string=None):
with values as f:
parser.parse_known_args(f.read().split(), namespace)
_global_parser = argparse.ArgumentParser()
_global_parser.add_argument('--config', type=open,
action=_LoadFromFile,
help="File containing configuration")
class ConfigValues(object):
"""
Command line argument helper class.
"""
def __init__(self):
"""Global container and accessor for configs and their values."""
self.__dict__['__configs'] = {}
self.__dict__['__parsed'] = False
def _parse_configs(self):
result, _ = _global_parser.parse_known_args()
if '__configs' not in self.__dict__:
self.__dict__['__configs'] = {}
if '__parsed' not in self.__dict__:
self.__dict__['__parsed'] = False
for config_name, val in vars(result).items():
self.__dict__['__configs'][config_name] = val
self.__dict__['__parsed'] = True
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the config --name."""
if ('__parsed' not in self.__dict__) or (not self.__dict__['__parsed']):
self._parse_configs()
if name not in self.__dict__['__configs']:
raise AttributeError(name)
return self.__dict__['__configs'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the config --name."""
if ('__parsed' not in self.__dict__) or (not self.__dict__['__parsed']):
self._parse_configs()
self.__dict__['__configs'][name] = value
def _define_helper(config_name, default_value, docstring, configtype):
"""Registers 'config_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument("--" + config_name,
default=default_value,
help=docstring,
type=configtype)
def DEFINE_string(config_name, default_value, docstring):
"""Defines a config of type 'string'.
Args:
config_name: The name of the config as a string.
default_value: The default value the config should take as a string.
docstring: A helpful message explaining the use of the config.
"""
_define_helper(config_name, default_value, docstring, str)
def DEFINE_integer(config_name, default_value, docstring):
"""Defines a config of type 'int'.
Args:
config_name: The name of the config as a string.
default_value: The default value the config should take as an int.
docstring: A helpful message explaining the use of the config.
"""
_define_helper(config_name, default_value, docstring, int)
def DEFINE_boolean(config_name, default_value, docstring):
"""Defines a config of type 'boolean'.
Args:
config_name: The name of the config as a string.
default_value: The default value the config should take as a boolean.
docstring: A helpful message explaining the use of the config.
"""
# Register a custom function for 'bool' so --config=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + config_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
_global_parser.add_argument('--no' + config_name,
action='store_false',
dest=config_name)
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(config_name, default_value, docstring):
"""Defines a config of type 'float'.
Args:
config_name: The name of the config as a string.
default_value: The default value the config should take as a float.
docstring: A helpful message explaining the use of the config.
"""
_define_helper(config_name, default_value, docstring, float)
def DEFINE_list_integer(config_name, default_value, docstring):
"""
Defines a config of a list with `int` data type
:param config_name: The name of the config as a string
:param default_value: The default value the config should take as an int
:param docstring: A helpful message explaining the use of the config.
:return:
"""
_global_parser.add_argument('--' + config_name,
nargs='*',
help=docstring,
default=[int(default_value)] if default_value is not None else [None],
type=int)
def DEFINE_list_float(config_name, default_value, docstring):
"""
Defines a config of a list with `float` data type
:param config_name: The name of the config as a string
:param default_value: The default value the config should take as an float
:param docstring: A helpful message explaining the use of the config.
:return:
"""
_global_parser.add_argument('--' + config_name,
nargs='*',
help=docstring,
default=[float(default_value)] if default_value is not None else [None],
type=float)
def DEFINE_list_string(config_name, default_value, docstring):
"""
Defines a config of a list with `str` data type
:param config_name: The name of the config as a string
:param default_value: The default value the config should take as an str
:param docstring: A helpful message explaining the use of the config.
:return:
"""
_global_parser.add_argument('--' + config_name,
nargs='*',
help=docstring,
default=[default_value],
type=str)
def DEFINE_list_boolean(config_name, default_value, docstring):
"""
Defines a config of a list with `boolean` data type
:param config_name: The name of the config as a string
:param default_value: The default value the config should take as a boolean
:param docstring: A helpful message explaining the use of the config.
:return:
"""
def convert_to_list(s):
return s.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + config_name,
nargs='*',
help=docstring,
default=[default_value],
type=convert_to_list)
| 37.619048
| 104
| 0.61097
|
9d5ccc15fda70a13beafd40271eb3a21777054b5
| 7,400
|
py
|
Python
|
mayan/apps/mailer/tests/test_workflow_actions.py
|
darrenflexxu/Mayan-EDMS
|
6707365bfacd137e625ddc1b990168012246fa07
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/mailer/tests/test_workflow_actions.py
|
darrenflexxu/Mayan-EDMS
|
6707365bfacd137e625ddc1b990168012246fa07
|
[
"Apache-2.0"
] | 5
|
2021-03-19T22:59:52.000Z
|
2022-03-12T00:13:16.000Z
|
mayan/apps/mailer/tests/test_workflow_actions.py
|
Sumit-Kumar-Jha/mayan
|
5b7ddeccf080b9e41cc1074c70e27dfe447be19f
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import unicode_literals
import json
from django.core import mail
from mayan.apps.common.tests.base import GenericViewTestCase
from mayan.apps.documents.tests.mixins import DocumentTestMixin
from mayan.apps.document_states.literals import WORKFLOW_ACTION_ON_ENTRY
from mayan.apps.document_states.tests.base import ActionTestCase
from mayan.apps.document_states.tests.mixins import WorkflowTestMixin
from mayan.apps.metadata.tests.mixins import MetadataTypeTestMixin
from ..permissions import permission_user_mailer_use
from ..workflow_actions import EmailAction
from .literals import (
TEST_EMAIL_ADDRESS, TEST_EMAIL_BODY, TEST_EMAIL_FROM_ADDRESS,
TEST_EMAIL_SUBJECT
)
from .mixins import MailerTestMixin
class EmailActionTestCase(MailerTestMixin, WorkflowTestMixin, ActionTestCase):
def test_email_action_literal_text(self):
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
def test_email_action_workflow_execute(self):
self._create_test_workflow()
self._create_test_workflow_state()
self._create_test_user_mailer()
self.test_workflow_state.actions.create(
action_data=json.dumps(
{
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
),
action_path='mayan.apps.mailer.workflow_actions.EmailAction',
label='test email action', when=WORKFLOW_ACTION_ON_ENTRY,
)
self.test_workflow_state.initial = True
self.test_workflow_state.save()
self.test_workflow.document_types.add(self.test_document_type)
self.upload_document()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
class EmailActionTemplateTestCase(MetadataTypeTestMixin, MailerTestMixin, WorkflowTestMixin, ActionTestCase):
def test_email_action_recipient_template(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(metadata_type=self.test_metadata_type)
self.test_document.metadata.create(metadata_type=self.test_metadata_type, value=TEST_EMAIL_ADDRESS)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': '{{{{ document.metadata_value_of.{} }}}}'.format(self.test_metadata_type.name),
'subject': TEST_EMAIL_SUBJECT,
'body': '',
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(
mail.outbox[0].to, [self.test_document.metadata.first().value]
)
def test_email_action_subject_template(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(metadata_type=self.test_metadata_type)
self.test_document.metadata.create(metadata_type=self.test_metadata_type, value=TEST_EMAIL_SUBJECT)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': '{{{{ document.metadata_value_of.{} }}}}'.format(self.test_metadata_type.name),
'body': '',
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(
mail.outbox[0].subject, self.test_document.metadata.first().value
)
def test_email_action_body_template(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(metadata_type=self.test_metadata_type)
self.test_document.metadata.create(metadata_type=self.test_metadata_type, value=TEST_EMAIL_BODY)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': '{{{{ document.metadata_value_of.{} }}}}'.format(self.test_metadata_type.name),
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(mail.outbox[0].body, TEST_EMAIL_BODY)
class EmailActionViewTestCase(DocumentTestMixin, MailerTestMixin, WorkflowTestMixin, GenericViewTestCase):
auto_upload_document = False
def test_email_action_create_get_view(self):
self._create_test_workflow()
self._create_test_workflow_state()
self._create_test_user_mailer()
response = self.get(
viewname='document_states:workflow_template_state_action_create',
kwargs={
'pk': self.test_workflow_state.pk,
'class_path': 'mayan.apps.mailer.workflow_actions.EmailAction',
}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.test_workflow_state.actions.count(), 0)
def _request_email_action_create_post_view(self):
return self.post(
viewname='document_states:workflow_template_state_action_create',
kwargs={
'pk': self.test_workflow_state.pk,
'class_path': 'mayan.apps.mailer.workflow_actions.EmailAction',
}, data={
'when': WORKFLOW_ACTION_ON_ENTRY,
'label': 'test email action',
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
)
def test_email_action_create_post_view(self):
self._create_test_workflow()
self._create_test_workflow_state()
self._create_test_user_mailer()
self.grant_access(
obj=self.test_user_mailer, permission=permission_user_mailer_use
)
response = self._request_email_action_create_post_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_workflow_state.actions.count(), 1)
| 39.784946
| 109
| 0.670811
|
babf3c4f1d4247ea2a4d0888f6d5fef9872e826e
| 1,719
|
py
|
Python
|
tests/addresslib/quote_test.py
|
skshetry/flanker
|
63d1cdf927777f49f97e8d7f01e105a3b0d25cd2
|
[
"Apache-2.0"
] | 929
|
2015-01-01T11:14:21.000Z
|
2022-03-28T23:47:40.000Z
|
tests/addresslib/quote_test.py
|
skshetry/flanker
|
63d1cdf927777f49f97e8d7f01e105a3b0d25cd2
|
[
"Apache-2.0"
] | 141
|
2015-01-10T19:02:03.000Z
|
2021-07-26T18:04:14.000Z
|
tests/addresslib/quote_test.py
|
skshetry/flanker
|
63d1cdf927777f49f97e8d7f01e105a3b0d25cd2
|
[
"Apache-2.0"
] | 179
|
2015-01-01T18:42:46.000Z
|
2022-02-16T21:57:14.000Z
|
# coding=utf-8
from nose.tools import eq_
from flanker.addresslib.quote import smart_quote, smart_unquote
def test_quote():
eq_('"foo, bar"', smart_quote('foo, bar'))
eq_('"foo; bar"', smart_quote('foo; bar'))
eq_('"foo< bar"', smart_quote('foo< bar'))
eq_('"foo> bar"', smart_quote('foo> bar'))
eq_('"foo\\" bar"', smart_quote('foo" bar'))
eq_('"foo: bar"', smart_quote('foo: bar'))
def test_quote__periods():
eq_('foo. bar', smart_quote('foo. bar'))
def test_quote__spaces():
eq_('foo bar', smart_quote('foo bar'))
eq_('" foo bar"', smart_quote(' foo bar'))
eq_('"foo bar "', smart_quote('foo bar '))
eq_('" foo bar "', smart_quote(' foo bar '))
eq_('foo\tbar', smart_quote('foo\tbar'))
eq_('"\tfoo\tbar"', smart_quote('\tfoo\tbar'))
eq_('"foo\tbar\t"', smart_quote('foo\tbar\t'))
eq_('"\tfoo\tbar\t"', smart_quote('\tfoo\tbar\t'))
def test_quote__escaping():
eq_('"f\\\\o\\"o \\"bar\\""', smart_quote('f\\o"o "bar"'))
eq_('"\\"foo\\""', smart_quote('"foo"'))
eq_('"\\"foo\\"bar\\""', smart_quote('"foo"bar"'))
def test_quote__nothing_to_quote():
eq_('', smart_quote(''))
eq_('foo bar', smart_quote('foo bar'))
eq_("!#$%&'*+-/=?^_`{|}~",
smart_quote("!#$%&'*+-/=?^_`{|}~"))
def test_unquote():
eq_('foo bar "(bazz)" blah oops',
smart_unquote('foo "bar \\"(bazz)\\" blah" oops'))
eq_('foo; bar. \\bazz\\', smart_unquote('"foo;" "bar." "\\\\bazz\\\\"'))
eq_('"foo"bar"', smart_unquote('"\\"foo\\"bar\\"'))
def test_unquote__nothing_to_unquote():
eq_('foo\\.;\tbar', smart_unquote('foo\\.;\tbar'))
def test_unquote__unicode():
eq_(u'Превед Медвед', smart_unquote(u'Превед Медвед'))
| 30.696429
| 78
| 0.578243
|
52ff97400c9beafca602520900a49837c1d0c1db
| 4,929
|
py
|
Python
|
test/with_dummyserver/test_poolmanager.py
|
1T/urllib3
|
7d50b9fb19df3c115a9fad4f54d1f3d971fb2cc7
|
[
"MIT"
] | 3
|
2015-01-17T02:29:11.000Z
|
2018-03-26T01:46:52.000Z
|
test/with_dummyserver/test_poolmanager.py
|
1T/urllib3
|
7d50b9fb19df3c115a9fad4f54d1f3d971fb2cc7
|
[
"MIT"
] | null | null | null |
test/with_dummyserver/test_poolmanager.py
|
1T/urllib3
|
7d50b9fb19df3c115a9fad4f54d1f3d971fb2cc7
|
[
"MIT"
] | null | null | null |
import unittest
import json
from dummyserver.testcase import (HTTPDummyServerTestCase,
IPv6HTTPDummyServerTestCase)
from urllib3.poolmanager import PoolManager
from urllib3.connectionpool import port_by_scheme
from urllib3.exceptions import MaxRetryError, SSLError
class TestPoolManager(HTTPDummyServerTestCase):
def setUp(self):
self.base_url = 'http://%s:%d' % (self.host, self.port)
self.base_url_alt = 'http://%s:%d' % (self.host_alt, self.port)
def test_redirect(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/' % self.base_url},
redirect=False)
self.assertEqual(r.status, 303)
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/' % self.base_url})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_redirect_twice(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect' % self.base_url},
redirect=False)
self.assertEqual(r.status, 303)
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect?target=%s/' % (self.base_url, self.base_url)})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_redirect_to_relative_url(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields = {'target': '/redirect'},
redirect = False)
self.assertEqual(r.status, 303)
r = http.request('GET', '%s/redirect' % self.base_url,
fields = {'target': '/redirect'})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_cross_host_redirect(self):
http = PoolManager()
cross_host_location = '%s/echo?a=b' % self.base_url_alt
try:
http.request('GET', '%s/redirect' % self.base_url,
fields={'target': cross_host_location},
timeout=0.01, retries=0)
self.fail("Request succeeded instead of raising an exception like it should.")
except MaxRetryError:
pass
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/echo?a=b' % self.base_url_alt},
timeout=0.01, retries=1)
self.assertEqual(r._pool.host, self.host_alt)
def test_missing_port(self):
# Can a URL that lacks an explicit port like ':80' succeed, or
# will all such URLs fail with an error?
http = PoolManager()
# By globally adjusting `port_by_scheme` we pretend for a moment
# that HTTP's default port is not 80, but is the port at which
# our test server happens to be listening.
port_by_scheme['http'] = self.port
try:
r = http.request('GET', 'http://%s/' % self.host, retries=0)
finally:
port_by_scheme['http'] = 80
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_headers(self):
http = PoolManager(headers={'Foo': 'bar'})
r = http.request_encode_url('GET', '%s/headers' % self.base_url)
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), 'bar')
r = http.request_encode_body('POST', '%s/headers' % self.base_url)
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), 'bar')
r = http.request_encode_url('GET', '%s/headers' % self.base_url, headers={'Baz': 'quux'})
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), None)
self.assertEqual(returned_headers.get('Baz'), 'quux')
r = http.request_encode_body('GET', '%s/headers' % self.base_url, headers={'Baz': 'quux'})
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), None)
self.assertEqual(returned_headers.get('Baz'), 'quux')
def test_http_with_ssl_keywords(self):
http = PoolManager(ca_certs='REQUIRED')
r = http.request('GET', 'http://%s:%s/' % (self.host, self.port))
self.assertEqual(r.status, 200)
class TestIPv6PoolManager(IPv6HTTPDummyServerTestCase):
def setUp(self):
self.base_url = 'http://[%s]:%d' % (self.host, self.port)
def test_ipv6(self):
http = PoolManager()
http.request('GET', self.base_url)
if __name__ == '__main__':
unittest.main()
| 35.978102
| 102
| 0.590586
|
26e9e2e0c834b87a52e8c638ed00dc05b9a3f242
| 15,184
|
py
|
Python
|
gbe_browser/processannot.py
|
whyrg/GlobalBiobankEngine
|
514f16eaaae16f0459b40cd1080c9243f007ec91
|
[
"MIT"
] | null | null | null |
gbe_browser/processannot.py
|
whyrg/GlobalBiobankEngine
|
514f16eaaae16f0459b40cd1080c9243f007ec91
|
[
"MIT"
] | null | null | null |
gbe_browser/processannot.py
|
whyrg/GlobalBiobankEngine
|
514f16eaaae16f0459b40cd1080c9243f007ec91
|
[
"MIT"
] | null | null | null |
from __future__ import division
import lookups
import re
import scidbbiobank
import config
import utils
import logging
import numpy
import sys
import os
import scipy.stats
ns = sys.argv[1]
def get_db(name_space):
DB = scidbbiobank.connect(scidb_url=os.getenv('SCIDB_URL',None), scidb_auth=('scidbadmin', 'Paradigm4'), namespace=name_space)
DB.set_limit(15000)
return DB
db = get_db(ns)
RSID_FORMAT = '{chrom}-{pos}-{ref}-{alt}'
# 1:1-1000
REGION_RE1 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)-(\d+)$')
REGION_RE2 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)$')
REGION_RE3 = re.compile(r'^(\d+|X|Y|M|MT)$')
REGION_RE4 = re.compile(r'^(\d+|X|Y|M|MT)\s*[-:]\s*(\d+)-([ATCG]+)-([ATCG]+)$')
TRANSCRIPT_INFO_KEYS = ('transcript_id', 'strand', 'chrom', 'start', 'stop')
EXON_INFO_KEYS = ('feature_type', 'chrom', 'start', 'stop')
def numpy2dict0(ar):
"""Convert SciDB NumPy record result to Python dictionary and populate
nullable attributes with values (discards null codes).
"""
if not len(ar):
return None
el = ar[0]
return dict(
(de[0],
el[de[0]]['val'] if isinstance(de[1], list) else el[de[0]])
for de in ar.dtype.descr if de[0] != 'notused')
def numpy2dict(ar):
"""Convert SciDB NumPy array result to a list of Python dictionary and
populate nullable attributes with values (discards null codes).
"""
return [
dict(
(de[0],
el[de[0]]['val'] if isinstance(de[1], list) else el[de[0]])
for de in ar.dtype.descr if de[0] != 'notused'
)
for el in ar]
def parse_vep_annotations(csq, gene_id=None, transcript_id=None):
return [ann for ann in (dict(zip(config.VARIANT_CSQ, cs.split('|')))
for cs in csq.split(','))
if ('Feature' in ann and
ann['Feature'].startswith('ENST') and
(gene_id is None or ann['Gene'] == gene_id) and
(transcript_id is None or ann['Feature'] == transcript_id))]
def format_variants(variants, add_ann=False, gene_id=None, transcript_id=None):
for variant in variants:
variant['rsid'] = ('rs{}'.format(variant['rsid'])
if variant['rsid'] else '.')
variant['variant_id'] = RSID_FORMAT.format(
chrom=variant['chrom'],
pos=variant['pos'],
ref=variant['ref'],
alt=variant['alt'])
vep_annotations = parse_vep_annotations(
variant['csq'], gene_id, transcript_id)
if add_ann:
variant['vep_annotations'] = vep_annotations
variant['genes'] = list(set(ann['Gene'] for ann in vep_annotations))
variant['gene_name'] = ','.join(variant['genes'][:3])
variant['gene_symbol'] = ','.join(
itertools.islice(set(ann['SYMBOL'] for ann in vep_annotations), 3))
variant['transcripts'] = list(set(
ann['Feature'] for ann in vep_annotations))
utils.add_consequence_to_variant(variant, vep_annotations)
return variants
def cast_pos_info(gene):
for key in ('chrom', 'start', 'stop'):
if key in gene:
gene[key] = int(gene[key])
return gene
def add_xpos(gene):
if gene and all(k in gene.keys() for k in ('chrom', 'start', 'end')):
gene['xstart'] = gene['chrom'] * config.XOFF + gene['start']
gene['xstop'] = gene['chrom'] * config.XOFF + gene['end']
return gene
def exists(db, array_name, attr_name, attr_val):
"""
Search bar
MongoDB:
db.genes.find({'gene_id': 'ENSG00000107404'}, fields={'_id': False})
SciDB:
aggregate(
filter(gene_index, gene_id = 'ENSG00000198734'),
count(*));
"""
return bool(
db.iquery(
config.EXISTS_QUERY.format(
array_name=array_name,
attr_name=attr_name,
attr_val=attr_val),
schema=config.EXISTS_SCHEMA,
fetch=True,
atts_only=True)[0]['count']['val'])
# -- -
# -- - ICD - --
# -- -
def get_icd_name_map(db):
"""
e.g.,
UI:
https://biobankengine.stanford.edu/coding/RH117
MongoDB:
db.icd_info.find({'icd': 'RH117'}, fields={'_id': False})
SciDB:
project(icd_info, icd, Name);
"""
return dict((i['icd']['val'], ' '.join(i['Name']['val'].split()))
for i in db.iquery(config.ICD_INFO_MAP_QUERY,
schema=config.ICD_INFO_MAP_SCHEMA,
fetch=True,
atts_only=True))
def exists_icd(db, icd):
"""
Search bar
MongoDB:
db.icd_info.find({'icd': 'RH141'}, fields={'_id': False})
SciDB:
aggregate(
filter(icd_info, icd = 'RH141'),
count(*));
SciDBnew:
res = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0]))
resphe = [res['description'] == icd]
return bool(resphe.empty)
"""
res = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0]))
resphe = res[res['description'] == icd]
return not bool(resphe.empty)
def get_phe_title(db, phename):
"""
Search bar
SciDBnew:
res = str(list(phef[phef['description'] == "asthma_diagnosed_by_doctor"]['title'])[0])
"""
phef = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0]))
if phef.empty:
return None
else:
res = str(list(phef[phef['description'] == phename]['title'])[0])
return res
def get_phe_name(db, icd):
"""
Search bar
SciDBnew:
icdres['shortname'] = icdres.apply(lambda row: str(df[df['title'] == row['title']]['notes'].squeeze()).split(';')[1].split('=')[1], axis = 1)
logging.info('shortname')
icdres['Case'] = icdres.apply(lambda row: str(df[df['title'] == row['title']]['notes'].squeeze()).split(';')[0].split('=')[1], axis = 1)
icdres['gene_name'], icdres['gene_symbol'], icdres['HGVSp'], icdres['HGVSc'] = zip(*icdres['annotations'].map(utils.return_gene_vep))
#icdres.apply(lambda row: utils.return_gene_vep(row['annotations']), axis = 1)
logging.info(icdres['gene_symbol'])
logging.info('Case')
icdres['log10pvalue'] = icdres.apply(lambda row: -numpy.log10(row['pvalue']), axis = 1)
logging.info('l10pval')
icdres['icd'] = icdres['title']
logging.info('icd')
if icdres.loc[icdres['odds_ratio'].isna()].shape[0] < icdres.loc[icdres['beta'].isna()].shape[0]:
icdres['or_val'] = icdres['odds_ratio']
res = str(list(phef[phef['description'] == "asthma_diagnosed_by_doctor"]['title'])[0])
"""
phef = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0]))
if phef.empty:
return None
else:
res = str(phef[phef['title'] == icd]['notes'].squeeze()).split(';')[1].split('=')[1]
return res
def get_phe_case(db, icd):
"""
Search bar
SciDBnew:
icdres['shortname'] = icdres.apply(lambda row: str(df[df['title'] == row['title']]['notes'].squeeze()).split(';')[1].split('=')[1], axis = 1)
logging.info('shortname')
icdres['Case'] = icdres.apply(lambda row: str(df[df['title'] == row['title']]['notes'].squeeze()).split(';')[0].split('=')[1], axis = 1)
icdres['gene_name'], icdres['gene_symbol'], icdres['HGVSp'], icdres['HGVSc'] = zip(*icdres['annotations'].map(utils.return_gene_vep))
#icdres.apply(lambda row: utils.return_gene_vep(row['annotations']), axis = 1)
logging.info(icdres['gene_symbol'])
logging.info('Case')
icdres['log10pvalue'] = icdres.apply(lambda row: -numpy.log10(row['pvalue']), axis = 1)
logging.info('l10pval')
icdres['icd'] = icdres['title']
logging.info('icd')
if icdres.loc[icdres['odds_ratio'].isna()].shape[0] < icdres.loc[icdres['beta'].isna()].shape[0]:
icdres['or_val'] = icdres['odds_ratio']
res = str(list(phef[phef['description'] == "asthma_diagnosed_by_doctor"]['title'])[0])
"""
phef = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0]))
if phef.empty:
return None
else:
res = str(phef[phef['title'] == icd]['notes'].squeeze()).split(';')[0].split('=')[1]
return res
def get_icd_by_chrom_pos(db, chrom, start, stop=None, icd=None):
"""
e.g.,
UI:
https://biobankengine.stanford.edu/variant/1-39381448
MongoDB:
db.icd.find({'xpos': '1039381448'}, fields={'_id': False})
SciDB:
equi_join(
between(icd, null, 1, 39381448, null,
null, 1, 39381448, null),
icd_info,
'left_names=icd_idx',
'right_names=icd_idx',
'keep_dimensions=1',
'algorithm=hash_replicate_right');
SciDBnew:
db.get_association_data(association_set=assocset, chromosome=22, position = 32334104)
"""
if not stop:
stop = start
# if not icd:
# icd_info_filter = config.ICD_INFO_ARRAY
# else:
# icd_info_filter = 'filter({icd_info_array}, {cond})'.format(
# icd_info_array=config.ICD_INFO_ARRAY,
# cond=' or '.join("icd = '{}'".format(i) for i in icd))
# return numpy2dict(
# db.iquery(
# config.ICD_CHROM_POS_LOOKUP_QUERY.format(
# icd_info_filter=icd_info_filter,
# chrom=chrom,
# start=start,
# stop=stop),
# schema=config.ICD_CHROM_POS_LOOKUP_SCHEMA,
# fetch=True,
# atts_only=True))
def get_icd_affyid(db, affyid):
"""
e.g.,
UI:
https://biobankengine.stanford.edu/intensity/723307
MongoDB:
db.icd.find_one({'affyid': '723307'}, fields={'_id': False})
SciDB:
equi_join(
icd_affyid,
filter(affyid_index, affyid = '723307'),
'left_names=affyid_idx',
'right_names=affyid_idx',
'algorithm=hash_replicate_right');
"""
return numpy2dict0(
db.iquery(
config.ICD_AFFYID_LOOKUP_QUERY.format(affyid=affyid),
schema=config.ICD_AFFYID_LOOKUP_SCHEMA,
fetch=True,
atts_only=True))
#def get_icd_variant_by_icd_id_pvalue(db, icd_id, field_identifier, pvalue=0.001):
assocset = str(db.list_association_sets()['name'][0])
chroms = range(1,23)
chroms.append('X')
chroms.append('Y')
for chrom in chroms:
fnout = ns + '.' + str(chrom) + '.txt'
"""
e.g.,OB
UI:
https://biobankengine.stanford.edu/coding/RH117
MongoDB:
db.icd.find({'icd': 'RH117', 'stats.pvalue': {'$lt': 0.01}},
fields={'_id': false})
db.icd_info.find({'icd': 'RH117'}, fields={'_id': False})
db.variants.find({'xpos': '1039381448'}, fields={'_id': False})
SciDB:
equi_join(
project(variant,
rsid,
ref,
alt,
filter,
exac_nfe,
csq),
cross_join(
project(
between(icd, null, null, null, 1, null,
null, null, null, null, null),
or_val,
pvalue,
log10pvalue),
filter(icd_info, icd = 'RH117'),
icd.icd_idx,
icd_info.icd_idx) as icd_join,
'left_names=chrom,pos',
'right_names=chrom,pos',
'keep_dimensions=1',
'algorithm=merge_right_first');
SciDBnew:
bb.get_association_data(association_set=assocset,
chromosome=22, start=32300000, end=32400000, pvalue_max=1, field_id = int(df[df['title'] == 'HC382']['field_id']))
"""
df = db.get_variants(association_set=assocset, chromosome=chrom, variant_fields = ('chrom','pos','ref','alt', 'gnomad_filter', 'consequence', 'all_filters', 'annotations', 'maf', 'ld', 'rsid') )
df['variant_id'] = df.apply(lambda row: RSID_FORMAT.format(
chrom=row['chrom'],
pos=row['pos'],
ref=row['ref'],
alt=row['alt']), axis = 1)
df.loc[~df.consequence.isin(utils.csq_order),'consequence'] = 'intergenic'
df['major_consequence'] = df['consequence']
df['category'] = df.apply(lambda row: utils.add_category_to_variant(row['consequence']), axis = 1)
# icdres['filter'] = icdres.apply(lambda row: 'PASS' if row['all_filters'] == 0 and row['se'] <= .5 else 'SE' if row['se'] > .5 else 'FAIL', axis = 1)
# icdres['pvalue'] = icdres.apply(lambda row: row['pvalue'] if row['pvalue'] != 0 else 1e-300, axis = 1)
# icdres['Name'] = icdres.apply(lambda row: df[df['title'] == row['title']]['description'].squeeze(), axis = 1)
# icdres['shortname'] = icdres.apply(lambda row: str(df[df['title'] == row['title']]['notes'].squeeze()).split(';')[1].split('=')[1], axis = 1)
# icdres['Name'] = icdres['shortname']
# icdres['Case'] = icdres.apply(lambda row: str(df[df['title'] == row['title']]['notes'].squeeze()).split(';')[0].split('=')[1], axis = 1)
df['gene_name'], df['gene_symbol'], df['HGVSp'], df['HGVSc'] = zip(*df['annotations'].map(utils.return_gene_vep))
# icdres = icdres.drop(columns=['annotations'])
#icdres.apply(lambda row: utils.return_gene_vep(row['annotations']), axis = 1)
#icdres['log10pvalue'] = icdres.apply(lambda row: -numpy.log10(row['pvalue']), axis = 1)
#icdres['icd'] = icdres['title']
#if icdres.loc[icdres['odds_ratio'].isna()].shape[0] < icdres.loc[icdres['beta'].isna()].shape[0]:
# icdres['or_val'] = icdres['odds_ratio']
# icdres['lor_val'] = icdres.apply(lambda row: numpy.log(row['odds_ratio']), axis = 1)
#else:
# icdres['or_val'] = icdres['beta']
# icdres['lor_val'] = icdres['beta']
df['ukbb_freq'] = df.apply(lambda row: min(float(row['maf']), 1 - float(row['maf'])) if row['maf'] is not None else None, axis = 1)
df['gnomad_af'] = df.apply(lambda row: min(float(row['gnomad_filter']),1-float(row['gnomad_filter'])) if row['gnomad_filter'] is not None else None, axis = 1)
# df['enrichlogor'] = df.apply(lambda row: numpy.log((float(row['ukbb_freq'])*(1-row['gnomad_af']))/((.00000000001 + row['gnomad_af'])*(0.00000000001 + 1-float(row['ukbb_freq'])))) if row['gnomad_af'] is not None else 10, axis = 1)
# df['enrichp'] = df.apply(lambda row: scipy.stats.norm.sf(row['enrichlogor']/numpy.sqrt(1/(float(row['ukbb_freq'])*200000*2 + .5) + 1/((1-float(row['ukbb_freq']))*200000*2 + .5) + 1/(float(row['gnomad_af'])*30000*2 + .5) + 1/((1 - float(row['gnomad_af']))*30000*2 + .5))) if row['gnomad_filter'] is not None else 0, axis = 1)
# dfn = df[['chrom','pos', 'ref','alt','variant_id','major_consequence','category','gene_name','gene_symbol','HGVSp','HGVSc','ukbb_freq','gnomad_af','enrichlogor', 'enrichp']]
dfn = df[['chrom','pos', 'ref','alt','variant_id','major_consequence','category','gene_name','gene_symbol','HGVSp','HGVSc','ukbb_freq','gnomad_af']]
dfn.to_csv(fnout, index=None, sep='\t', mode='w')
| 38.055138
| 329
| 0.583443
|
a496a7731b51cc9d177f64b186213c75b42b7d39
| 9,434
|
py
|
Python
|
gui/qt/seed_dialog.py
|
VitaeTeam/ViLight
|
caedc04eb3717bd19774ef3b84f6e138b780f650
|
[
"MIT"
] | 1
|
2021-06-08T20:06:34.000Z
|
2021-06-08T20:06:34.000Z
|
gui/qt/seed_dialog.py
|
VitaeTeam/ViLight
|
caedc04eb3717bd19774ef3b84f6e138b780f650
|
[
"MIT"
] | 6
|
2019-12-01T23:58:20.000Z
|
2020-07-24T17:30:10.000Z
|
gui/qt/seed_dialog.py
|
VitaeTeam/ViLight
|
caedc04eb3717bd19774ef3b84f6e138b780f650
|
[
"MIT"
] | 3
|
2020-01-09T08:17:25.000Z
|
2020-09-15T01:04:41.000Z
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash.i18n import _
from .util import *
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
def seed_warning_msg(seed):
return ''.join([
"<p>",
_("Please save these %d words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>"
]) % len(seed.split())
class SeedLayout(QVBoxLayout):
#options
is_bip39 = False
is_ext = False
is_bip39_445 = False
def seed_options(self):
dialog = QDialog()
vbox = QVBoxLayout(dialog)
if 'ext' in self.options:
cb_ext = QCheckBox(_('Extend this seed with custom words') + " " + _("(aka 'passphrase')"))
cb_ext.setChecked(self.is_ext)
vbox.addWidget(cb_ext)
if 'bip39' in self.options:
def f(b):
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.is_bip39 = b
self.on_edit()
if b:
msg = ' '.join([
'<b>' + _('About BIP39') + ':</b> ',
_('BIP39 seeds can be imported into Electron Cash so that users can access funds from other wallets.'),
_('However, we do not generate BIP39 seeds because our seed format is better at preserving future compatibility.'),
_('BIP39 seeds do not include a version number, which makes compatibility with future software more difficult.')
])
else:
msg = ''
self.seed_warning.setText(msg)
cb_bip39 = QCheckBox(_('BIP39 seed'))
cb_bip39.toggled.connect(f)
cb_bip39.setChecked(self.is_bip39)
vbox.addWidget(cb_bip39)
# Note: I grep'd the sources. As of May 2019, this code path cannot
# be reached. I'm leaving this here in case it serves some purpose
# still -- but I cannot see any place in the code where this branch
# would be triggered. The below warning message is needlessly
# FUD-ey. It should be altered if this code path is ever reinstated.
# -Calin
if 'bip39_445' in self.options:
def f(b):
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.on_edit()
self.is_bip39 = b
if b:
msg = ' '.join([
'<b>' + _('Warning') + ': BIP39 seeds are dangerous!' + '</b><br/><br/>',
_('BIP39 seeds can be imported in Electron Cash so that users can access funds locked in other wallets.'),
_('However, BIP39 seeds do not include a version number, which compromises compatibility with future wallet software.'),
'<br/><br/>',
_('We do not guarantee that BIP39 imports will always be supported in Electron Cash.'),
_('In addition, Electron Cash does not verify the checksum of BIP39 seeds; make sure you type your seed correctly.'),
])
else:
msg = ''
self.seed_warning.setText(msg)
cb_bip39_445 = QCheckBox(_('Use Coin Type 145 with bip39'))
cb_bip39_445.toggled.connect(f)
cb_bip39_445.setChecked(self.is_bip39_445)
vbox.addWidget(cb_bip39_445)
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
self.is_ext = cb_ext.isChecked() if 'ext' in self.options else False
self.is_bip39 = cb_bip39.isChecked() if 'bip39' in self.options else False
self.is_bip39_445 = cb_bip39_445.isChecked() if 'bip39_445' in self.options else False
def __init__(self, seed=None, title=None, icon=True, msg=None, options=None, is_seed=None, passphrase=None, parent=None, editable=True):
QVBoxLayout.__init__(self)
self.parent = parent
self.options = options
if title:
self.addWidget(WWLabel(title))
self.seed_e = ButtonsTextEdit()
self.seed_e.setReadOnly(not editable)
if seed:
self.seed_e.setText(seed)
else:
self.seed_e.setTabChangesFocus(True)
self.is_seed = is_seed
self.saved_is_seed = self.is_seed
self.seed_e.textChanged.connect(self.on_edit)
self.seed_e.setMaximumHeight(75)
hbox = QHBoxLayout()
if icon:
logo = QLabel()
logo.setPixmap(QIcon(":icons/seed.png").pixmap(64))
logo.setMaximumWidth(60)
hbox.addWidget(logo)
hbox.addWidget(self.seed_e)
self.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addStretch(1)
self.seed_type_label = QLabel('')
hbox.addWidget(self.seed_type_label)
if options:
opt_button = EnterButton(_('Options'), self.seed_options)
hbox.addWidget(opt_button)
self.addLayout(hbox)
grid_maybe = None
grid_row = 0
if passphrase:
grid_maybe = QGridLayout()
passphrase_e = QLineEdit()
passphrase_e.setText(passphrase)
passphrase_e.setReadOnly(True)
grid_maybe.addWidget(QLabel(_("Your seed extension is") + ':'), grid_row, 0)
grid_maybe.addWidget(passphrase_e, grid_row, 1)
grid_row += 1
if derivation:
grid_maybe = grid_maybe or QGridLayout()
der_e = QLineEdit()
der_e.setText(str(derivation))
der_e.setReadOnly(True)
grid_maybe.addWidget(QLabel(_("Wallet derivation path") + ':'), grid_row, 0)
grid_maybe.addWidget(der_e, grid_row, 1)
grid_row += 1
if grid_maybe:
self.addLayout(grid_maybe)
self.addStretch(1)
self.seed_warning = WWLabel('')
if msg:
self.seed_warning.setText(seed_warning_msg(seed))
self.addWidget(self.seed_warning)
def get_seed(self):
text = self.seed_e.text()
return ' '.join(text.split())
def on_edit(self):
from electroncash.bitcoin import seed_type
s = self.get_seed()
b = self.is_seed(s)
if not self.is_bip39:
t = seed_type(s)
label = _('Seed Type') + ': ' + t if t else ''
else:
from electroncash.keystore import bip39_is_checksum_valid
is_checksum, is_wordlist = bip39_is_checksum_valid(s)
status = ('checksum: ' + ('ok' if is_checksum else 'failed')) if is_wordlist else 'unknown wordlist'
label = 'BIP39' + ' (%s)'%status
self.seed_type_label.setText(label)
self.parent.next_button.setEnabled(b)
class KeysLayout(QVBoxLayout):
def __init__(self, parent=None, title=None, is_valid=None, allow_multi=False):
QVBoxLayout.__init__(self)
self.parent = parent
self.is_valid = is_valid
self.text_e = ScanQRTextEdit(allow_multi=allow_multi)
self.text_e.textChanged.connect(self.on_edit)
self.addWidget(WWLabel(title))
self.addWidget(self.text_e)
def get_text(self):
return self.text_e.text()
def on_edit(self):
b = self.is_valid(self.get_text())
self.parent.next_button.setEnabled(b)
class SeedDialog(WindowModalDialog):
def __init__(self, parent, seed, passphrase):
WindowModalDialog.__init__(self, parent, ('Electron Cash - ' + _('Seed')))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
title = _("Your wallet generation seed is:")
slayout = SeedLayout(title=title, seed=seed, msg=True, passphrase=passphrase, editable=False)
vbox.addLayout(slayout)
vbox.addLayout(Buttons(CloseButton(self)))
| 41.559471
| 144
| 0.605152
|
a3585050425c0fe3ef7b814381a88b5d7e4cf123
| 26,658
|
py
|
Python
|
ext/platypus/build/lib.linux-x86_64-2.7/runner.py
|
mshabbirhasan/Genesis-indel
|
a62ad7b53ed4621b2a0f27ca7637e16a964d31ca
|
[
"MIT"
] | 5
|
2019-08-30T06:57:57.000Z
|
2019-09-17T14:51:03.000Z
|
ext/platypus/runner.py
|
mshabbirhasan/Genesis-indel
|
a62ad7b53ed4621b2a0f27ca7637e16a964d31ca
|
[
"MIT"
] | null | null | null |
ext/platypus/runner.py
|
mshabbirhasan/Genesis-indel
|
a62ad7b53ed4621b2a0f27ca7637e16a964d31ca
|
[
"MIT"
] | 1
|
2019-09-17T10:49:31.000Z
|
2019-09-17T10:49:31.000Z
|
"""
Code for identifying variants in illumina reads, based on Gerton's haplotype realignment
algorithm and initial implementation.
"""
from __future__ import division
import multiprocessing
import variantcaller
import extendedoptparse
import os
import random
import heapq
import math
import ast
import logging
import filez
import logging.handlers
import platypusutils
from variantcaller import PlatypusSingleProcess
from variantcaller import PlatypusMultiProcess
from platypusutils import open
###################################################################################################
class FileForQueueing(object):
"""
"""
def __init__(self, theFile, line):
"""
Store the file, and initialise the current value
"""
self.theFile = theFile
self.finishedReadingFile = False
self.heap = []
line = line
cols = line.strip().split("\t")
chrom = cols[0]
# Where possible, convert chromosome names into
# integers for sorting. If not possible, use
# original names.
try:
chrom = int(chrom.upper().strip("CHR"))
except:
pass
pos = int(cols[1])
heapq.heappush(self.heap, (chrom, pos, line))
while not self.finishedReadingFile and len(self.heap) < 100:
try:
line = self.theFile.next()
cols = line.strip().split("\t")
chrom = cols[0]
try:
chrom = int(chrom.upper().strip("CHR"))
except:
pass
pos = int(cols[1])
except StopIteration:
self.finishedReadingFile = True
break
heapq.heappush(self.heap, (chrom, pos, line))
# Now take the top line
self.chrom, self.pos, self.line = heapq.heappop(self.heap)
def __cmp__(self, other):
"""
Comparison function. Utilises the comparison function defined in
the AlignedRead class.
"""
return cmp(self.chrom, other.chrom) or cmp(self.pos, other.pos)
def __del__(self):
"""
Destructor
"""
self.theFile.close()
os.remove(self.theFile.name)
def next(self):
"""
Increment the iterator and yield the new value. Also, store the
current value for use in the comparison function.
"""
if not self.finishedReadingFile:
try:
line = self.theFile.next()
cols = line.strip().split("\t")
chrom = cols[0]
# Where possible, convert chromosome names into
# integers for sorting. If not possible, use
# original names.
try:
chrom = int(chrom.upper().strip("CHR"))
except:
pass
pos = int(cols[1])
heapq.heappush(self.heap, (chrom, pos, line))
except StopIteration:
self.finishedReadingFile = True
if len(self.heap) != 0:
# Now take the top line
self.chrom, self.pos, self.line = heapq.heappop(self.heap)
else:
raise StopIteration
###################################################################################################
def regionSort(x, y):
"""
Sort chromosomal regions
"""
chrom1 = x[0]
chrom2 = y[0]
pos1 = int(x[1])
pos2 = int(y[1])
try:
chrom1 = int(chrom1.replace("chr", ""))
chrom2 = int(chrom2.replace("chr", ""))
except ValueError:
pass
return cmp(chrom1, chrom2) or cmp(pos1, pos2)
###################################################################################################
def chromAndPosSort(x, y):
"""
Comparison function for use in sort routines. Compares strings of the form
chr10:0-100. Sorting is done first by chromosome, in alphabetical order, and then
by start position in numerical order.
"""
xChrom = x.split("_")[-1].split(":")[0]
yChrom = y.split("_")[-1].split(":")[0]
xStart = int(x.split(":")[1].split("-")[0])
yStart = int(y.split(":")[1].split("-")[0])
try:
xChrom = int(xChrom.replace("chr", ""))
yChrom = int(yChrom.replace("chr", ""))
except ValueError:
pass
return cmp(xChrom, yChrom) or cmp(xStart, yStart)
###################################################################################################
def parseTypeFromString(value):
"""
Parse a string representation of a variable into a true, typed, python variable
"""
return ast.literal_eval(value)
###################################################################################################
def parsePlatypusOptionsFromVCFHeader(line):
"""
"""
class fakeClass:
pass
optionsStr = line.split("=")[1].replace("{", "").replace("}","")
theOptions = fakeClass()
for option in optionsStr.split(","):
name,value = option.split(":", 1)
# Get rid of extra quotes and white-space
name = name.strip().strip("'")
value = value.strip()
# Get correct type, and set attribute
value = parseTypeFromString(value)
setattr(theOptions, name, value)
return theOptions
###################################################################################################
def continueCalling(args):
"""
This function allows the user to re-start Platypus from the partially completed output of
a previous job. This takes a single argument: the VCF file of a previous incomplete job. Platypus
then picks up all the options for the previous job from the VCF header, and restarts calling from the latest
sensible position (the last integer multipls of --bufferSize on the last chromosome in the VCF).
"""
# Create a logger
logger = logging.getLogger("ATemporaryLog")
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
ch.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
# Seed the Python random number generator
random.seed("Yet acquiescingly I did turn as he pointed: neither pride nor hope rekindling at the end descried, so much as gladness that some end might be.")
parser = extendedoptparse.OptionParser()
parser.add_option("--vcfFile", dest="vcfFile", help="Platypus will start again from the nearest possible co-ordinate to the end of this VCF. This must be a VCF produced by Platypus", action='store', type='string')
(options, args) = parser.parse_args(args)
newOutputFileName = options.vcfFile. replace(".vcf", "_ContinuedFromFailedProcess.vcf")
logger.info("Platypus will now attempt to finish running a failed process, from the VCF output in file %s" %(options.vcfFile))
logger.info("Complete output (old + new) will go to file %s" %(newOutputFileName))
theVCF = open(options.vcfFile, 'r')
lastLine = None
platypusOptions = None
for line in theVCF:
if "platypusOptions=" in line:
platypusOptions = parsePlatypusOptionsFromVCFHeader(line)
lastLine = line
if platypusOptions is None:
logger.error("Could not parse old platypus options from VCF %s" %(options.vcfFile))
logger.error("Check that VCF file is a valid platypus output file")
logger.error("Quitting now.")
return
cols = lastLine.strip().split("\t")
lastChrom = cols[0]
realLastPos = int(cols[1]) - 1
lastPos = (realLastPos//platypusOptions.bufferSize)*platypusOptions.bufferSize
if platypusOptions.nCPU != 1:
logger.error("Platypus can only currently continue from single process jobs")
logger.error("The VCF you specified was produced from a multi-process Platypus job (--nCPU != 1).")
logger.error("Quitting now.")
logger.info("Previous job failed at %s:%s. Job will be re-run from %s:%s" %(lastChrom,realLastPos,lastChrom,lastPos))
allRegions = sorted(platypusutils.getRegions(platypusOptions), cmp=regionSort)
theIndex = -1
for index,region in enumerate(allRegions):
if region[0] == lastChrom and region[2] == lastPos:
theIndex = index + 1
if theIndex == -1:
raise StandardError, "Could not find region which was unfinished in input VCF"
logger.info("Platypus will continue calling. Output will go to file %s." %(options.vcfFile))
doneRegions = allRegions[:theIndex]
doneChroms = set([x[0] for x in doneRegions if x[0] != lastChrom])
# Reset input VCF file
theVCF.seek(0,0)
# Make new file to store complete output
outputVCF = open(newOutputFileName, "w")
# Copy old, unfinished VCF into new VCF
for line in theVCF:
if line[0] == "#":
outputVCF.write(line)
else:
cols = line.split("\t")
chrom = cols[0]
pos = int(cols[1]) - 1
if chrom in doneChroms:
outputVCF.write(line)
elif chrom == lastChrom and pos < lastPos:
outputVCF.write(line)
else:
break
outputVCF.close()
setattr(platypusOptions, "unfinishedRegions", allRegions[theIndex:])
platypusOptions.output = newOutputFileName
runVariantCaller(platypusOptions, continuing=True)
###################################################################################################
def mergeVCFFiles(tempFileNames, finalFileName, log):
"""
"""
log.info("Merging output VCF file(s) into final file %s" %(finalFileName))
# Final output file
outputVCF = open(finalFileName, 'wb')
theHeap = []
# Initialise queue
for index, fileName in enumerate(tempFileNames):
theFile = open(fileName, 'rb')
for line in theFile:
# End of this file
if line[0] == "#":
if index == 0:
outputVCF.write(line)
else:
continue
else:
theFileForQueueing = FileForQueueing(theFile, line)
heapq.heappush(theHeap, theFileForQueueing)
break
# If there are no calls in the temp file, we still want to
# remove it.
else:
theFile.close()
os.remove(fileName)
# Merge-sort the output using a priority queue
while len(theHeap) != 0:
# Get file from heap in right order
nextFile = heapq.heappop(theHeap)
outputVCF.write(nextFile.line)
# Put file back on heap
try:
nextFile.next()
heapq.heappush(theHeap, nextFile)
except StopIteration:
continue
# Close final output file
outputVCF.close()
log.info("Finished merging VCF file(s)")
###################################################################################################
def runVariantCaller(options, continuing=False):
"""
Run the variant caller. If continuing == True, then we are picking up a failed job from
where it left off.
"""
# Seed the Python random number generator
random.seed("Full many a flower is born to blush unseen and waste its sweetness on the desert air")
# Set up basic logging
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
log = logging.getLogger('Log')
fh = None
ch = logging.StreamHandler()
if continuing:
fh = logging.FileHandler(options.logFileName, 'a')
else:
fh = logging.FileHandler(options.logFileName, 'w')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
if options.verbosity == 0:
log.setLevel(logging.DEBUG)
ch.setLevel(logging.ERROR)
fh.setLevel(logging.DEBUG)
elif options.verbosity == 1:
log.setLevel(logging.DEBUG)
ch.setLevel(logging.WARNING)
fh.setLevel(logging.DEBUG)
elif options.verbosity == 2:
log.setLevel(logging.DEBUG)
ch.setLevel(logging.INFO)
fh.setLevel(logging.DEBUG)
elif options.verbosity >= 3:
# Debug goes to file only.
log.setLevel(logging.DEBUG)
ch.setLevel(logging.INFO)
fh.setLevel(logging.DEBUG)
else:
raise StandardError, "Value of 'verbosity' input parameter must be between 0 and 3 inclusive"
log.addHandler(ch)
log.addHandler(fh)
if continuing:
log.info("Continuing variant calling from where we left off.")
else:
log.info("Beginning variant calling")
log.info("Output will go to %s" %(options.output))
regions = None
if continuing:
regions = options.unfinishedRegions
else:
regions = sorted(platypusutils.getRegions(options), cmp=regionSort)
if options.nCPU == 1:
fileName = None
if options.output == "-":
fileName = options.output
else:
fileName = options.output + "_temp_1.gz"
p1 = PlatypusSingleProcess(fileName, options, regions, continuing)
p1.run()
mergeVCFFiles([fileName], options.output, log)
else:
# Create process manager
fileNames = set()
processes = []
regionsForEachProcess = []
# In this case, create all the BAM files here, before splitting into separate processes. The files will be left open until
# the end of the parent process, and all child processes will share the same open files via pointers.
bamFileNames = None
samples = None
samplesByID = None
samplesByBAM = None
bamFiles = None
theLocks = None
for i in range(options.nCPU):
regionsForEachProcess.append([])
for index,region in enumerate(regions):
regionsForEachProcess[index % options.nCPU].append(region)
for index in range(options.nCPU):
#fileName = options.output + "_temp_%s.gz" %(index)
fileName = options.output + "_temp_%s" %(index)
fileNames.add(fileName)
processes.append(PlatypusMultiProcess(fileName, options, regionsForEachProcess[index]))
for process in processes:
process.start()
for process in processes:
process.join()
# Final output file
mergeVCFFiles(fileNames, options.output, log)
# All done. Write a message to the log, so that it's clear when the
# program has actually finished, and not crashed.
log.info("Finished variant calling")
###################################################################################################
def callVariants(args):
"""
Run the Platypus variant-caller, with the specified arguments
"""
parser = extendedoptparse.OptionParser()
# Input data and miscellaneous
parser.add_option("-o", "--output", dest="output", help="Output SNP data file", action='store', type='string', default="AllVariants.vcf")
parser.add_option("--refFile",dest="refFile", help="Fasta file of reference. Index must be in same directory", action='store', type='string', required=True)
parser.add_option("--regions", dest="regions", type="list", help = "region as comma-separated list of chr:start-end, or just list of chr, or nothing", default=None, action = 'store')
parser.add_option("--skipRegionsFile", dest="skipRegionsFile", type="string", help = "region as comma-separated list of chr:start-end, or just list of chr, or nothing", default=None, action = 'store')
parser.add_option("--bamFiles", dest="bamFiles", type="list", help = "Comma-delimited list of bam file names", default=None, required=True)
parser.add_option("--bufferSize", dest="bufferSize", type="int", help = "Data will be buffered in regions of this size", default=100000, required=False)
parser.add_option("--minReads", dest="minReads", help="Minimum number of supporting reads required before a variant candidate will be considered.", action='store', type='int', default=2)
parser.add_option("--maxReads", dest="maxReads", help="Maximium coverage in window", action='store', type='float', default=5000000)
parser.add_option("--verbosity", dest="verbosity", help="Level of logging", action='store', type='int', default=2)
parser.add_option("--maxReadLength", dest="rlen", help="Maximum read length", action='store', type = 'int', default=150)
parser.add_option("--logFileName", dest="logFileName", help="Name of log file", action='store', type='string', default="log.txt")
parser.add_option("--source", dest="sourceFile", help="vcf file(s) to get candidates from", action='store', type='list', default=None)
parser.add_option("--nCPU", dest="nCPU", help="Number of processors to use", action='store', type='int', default=1)
parser.add_option("--parseNCBI", dest="parseNCBI", help="", type=int, action='store', default=0)
parser.add_option("--compressReads", dest="compressReads", help="If this is set to 1, then all reads will be compressed, and decompressd on demand. This will slow things down, but reduce memory usage.", type='int', action='store', default=0)
parser.add_option("--qualBinSize", dest="qualBinSize", help="This sets the granularity used when compressing quality scores. If > 1 then quality compression is lossy", type='int', action='store', default=1)
# Calling Parameters
parser.add_option("--maxSize", dest="maxSize", help="Largest variant to consider", action='store', type='int', default=1500)
parser.add_option("--largeWindows", dest="largeWindows", help="If set to 1, window size can be up to 'maxSize'", action='store', type='int', default=0)
parser.add_option("--maxVariants", dest="maxVariants", help="Maximium variants to consider in a given window", action='store', type='int', default=8)
parser.add_option("--coverageSamplingLevel", dest="coverageSamplingLevel", help="Downsample to this level of coverage when filtering haplotypes in divergent regions.", action='store', type='int', default=30)
parser.add_option("--maxHaplotypes", dest="maxHaplotypes", help="Maximium haplotypes to consider in a given window", action='store', type='int', default=50)
parser.add_option("--skipDifficultWindows", dest="skipDifficultWindows", help="If set to 1, skip windows with > maxVariants candidates", action='store', type='int', default=0)
parser.add_option("--getVariantsFromBAMs", dest="getVariantsFromBAMs", help="If set to TRUE (default), variant candidates will be generated from BAMs as well as any other inputs", action='store', type='int', default=1)
parser.add_option("--genSNPs", dest="genSNPs", help="If set to TRUE (default), SNP candidates will be considered", action='store', type='int', default=1)
parser.add_option("--genIndels", dest="genIndels", help="If set to TRUE (default), Indel candidates will be considered", action='store', type='int', default=1)
parser.add_option("--mergeClusteredVariants", dest="mergeClusteredVariants", help="If set to 1, variant-containing windows which are close together will be merged, resulting in slower, more accurate variant calls in diverse regions", action='store', type='int', default=1)
parser.add_option("--minFlank", dest="minFlank", help="Ignore base-changes closer than minFlank bases to the end of reads. Also, merge SNPs within this distance into MNPs or complex replacements", action='store', type = 'int', default=10)
parser.add_option("--trimReadFlank", dest="trimReadFlank", help="Set base-qualities to 0 within 'trimReadFlank' bases of the end of reads", action='store', type = 'int', default=0)
parser.add_option("--filterVarsByCoverage", dest="filterVarsByCoverage", help="If 1, Platypus filters variants in difficult regions by the number of times each variant is seen.", action='store', type='int', default=1)
parser.add_option("--filteredReadsFrac", dest="filteredReadsFrac", help="If > this fraction of reads are filtered in a given window, the 'badReads filter is triggered.", action='store', type='float', default=0.7)
parser.add_option("--maxVarDist", dest="maxVarDist", help="Max distance between variants to be considered in the same window", action='store', type='int', default=15) # 9 is 1 base longer than the max possible alignment shift
parser.add_option("--minVarDist", dest="minVarDist", help="Min distance allowed between windows", action='store', type='int', default=9) # 9 is 1 base longer than the max possible alignment shift
parser.add_option("--useEMLikelihoods", dest="useEMLikelihoods", help="If 1, likelihoods computed from EM algorithm will be used to call genotypes for each sample, otherwise likelihoods from individual sample will be used.", action='store', type='int', default=0)
parser.add_option("--countOnlyExactIndelMatches", dest="countOnlyExactIndelMatches", help="If 1, only exactly matching indels will be counted in the NV field", action='store', type='int', default=0)
# Assembly parameters
parser.add_option("--assemble", dest="assemble", help="If 1, Cortex will be used to assemble variant candidates for Platypus to call.", action='store', type='int', default=0)
parser.add_option("--assembleAll", dest="assembleAll", help="If 1 then Platypus will assemble all regions.'.", action='store', type='int', default=1)
parser.add_option("--assemblyRegionSize", dest="assemblyRegionSize", help="Size of region to assemble with Cortex", action='store', type='int', default=1500)
parser.add_option("--assembleBadReads", dest="assembleBadReads", help="If 1, then use filtered 'bad' reads for local assembly", action='store', type='int', default=1)
parser.add_option("--assemblerKmerSize", dest="assemblerKmerSize", help="Kmer size to use for cortex assembly'.", action='store', type='int', default=15)
parser.add_option("--assembleBrokenPairs", dest="assembleBrokenPairs", help="If 1, then use broken read pairs for local assembly", action='store', type='int', default=0)
parser.add_option("--noCycles", dest="noCycles", help="If 1, then don't allow cycles in the graph", action='store', type='int', default=0)
# QC Parameters
parser.add_option("--minMapQual", dest="minMapQual", help="Minimum mapping quality of read. Any reads with map qual below this are ignored", action='store', type = 'int', default=20, required=False)
parser.add_option("--minBaseQual", dest="minBaseQual", help="Minimum allowed base-calling quality. Any bases with qual below this are ignored in SNP-calling", action='store', type = 'int', default=20, required=False)
parser.add_option("--minGoodQualBases", dest="minGoodQualBases", help="Min bases per read that must have base-quality >= 20.", action='store', type = 'int', default=20, required=False)
parser.add_option("--filterDuplicates", dest="filterDuplicates", help="If set to 1, duplicate reads will be removed based on the read-pair start and end", action='store', type = 'int', default=1, required=False)
parser.add_option("--filterReadsWithUnmappedMates", dest="filterReadsWithUnmappedMates", help="If set to 1, reads with un-mapped mates will be removed", action='store', type = 'int', default=1, required=False)
parser.add_option("--filterReadsWithDistantMates", dest="filterReadsWithDistantMates", help="If set to 1, reads with mates mapped far away will be removed", action='store', type = 'int', default=1, required=False)
parser.add_option("--filterReadPairsWithSmallInserts", dest="filterReadPairsWithSmallInserts", help="If set to 1, read pairs with insert sizes < one read length will be removed", action='store', type = 'int', default=1, required=False)
parser.add_option("--trimOverlapping", dest="trimOverlapping", help="If set to 1, overlapping paired reads have overlap set to qual 0", action='store', type = 'int', default=1, required=False)
parser.add_option("--trimAdapter", dest="trimAdapter", help="If set to 1, then sets to qual 0 any part of read which exceeds the mapped fragment length. This is mainly useful for trimming adapter sequences", action='store', type = 'int', default=1, required=False)
# Variant-calling Filter Parameters
parser.add_option("--maxGOF", dest="maxGOF", help="Max allowed value for goodness-of-fit test. Higher than this triggers GOF filter (Phred-scaled).", action='store', type='int', default=30)
parser.add_option("--minPosterior", dest="minPosterior", help="Only variants with posterior >= this will be outpu to the VCF. Value is a Phred-score.", action='store', type='int', default=5)
parser.add_option("--sbThreshold", dest="sbThreshold", help="P-value for strand-bias filtering..", action='store', type='float', default=1e-3)
parser.add_option("--scThreshold", dest="scThreshold", help="Cut-off for SC filter.", action='store', type='float', default=0.95)
parser.add_option("--abThreshold", dest="abThreshold", help="P-value for allele-bias filtering..", action='store', type='float', default=1e-3)
parser.add_option("--minVarFreq", dest="minVarFreq", help="Variants below this frequency will be flagged as allele-biased", action='store', type='float', default=0.05)
parser.add_option("--badReadsWindow", dest="badReadsWindow", help="Size of window around variant to look for low-quality bases.", action='store', type='int', default=11)
parser.add_option("--badReadsThreshold", dest="badReadsThreshold", help="Variants where the median minimum quality in a window of badReadsWindow around the variant position falls below this value will be filtered with the flag 'badReads'.", action='store', type='int', default=15)
parser.add_option("--rmsmqThreshold", dest="rmsmqThreshold", help="RMSMQ filter triggers when root-mean-square mapping quality across region containing variant is below this.", action='store', type='int', default=40)
parser.add_option("--qdThreshold", dest="qdThreshold", help="QD filter triggers quality/depth for variant is below this.", action='store', type='int', default=10)
parser.add_option("--hapScoreThreshold", dest="hapScoreThreshold", help="HapScore filter triggers HapScore for variant is above this.", action='store', type='int', default=4)
# Genome VCF parameters
parser.add_option("--outputRefCalls", dest="outputRefCalls", help="If 1, output block reference calls.", action='store', type='int', default=0)
(options, args) = parser.parse_args(args)
runVariantCaller(options)
###################################################################################################
| 48.557377
| 284
| 0.643259
|
fdaaf2a397db80e8732f351c90989fe01ca93c49
| 2,006
|
py
|
Python
|
tests/test_render_mail.py
|
kunxianhuang/mail_handler
|
8a84e6eeedf1bfe4083bf49735891eecc12c3848
|
[
"MIT"
] | null | null | null |
tests/test_render_mail.py
|
kunxianhuang/mail_handler
|
8a84e6eeedf1bfe4083bf49735891eecc12c3848
|
[
"MIT"
] | null | null | null |
tests/test_render_mail.py
|
kunxianhuang/mail_handler
|
8a84e6eeedf1bfe4083bf49735891eecc12c3848
|
[
"MIT"
] | null | null | null |
import glob
from click.testing import CliRunner
from mail_handler.render_mail import main
from tests.utils import (
compare_rendered_mail_all,
get_all_mail_names_from_path,
path_mails_to_send_no_separator,
path_mails_to_send_with_separator,
)
path_j2 = "./templates/sponsorship/spam_sponsors_2020.j2"
path_receivers_json = "./examples/sponsorship/spam_sponsors_2020.json"
path_pre_rendered_mails_no_separator = "./tests/data/no-separator"
path_pre_rendered_mails_with_separator = "./tests/data/with-separator"
def test_rendered_mail_no_separator(all_mails_base_no_separator):
runner = CliRunner()
result = runner.invoke(
main,
[
path_j2,
path_receivers_json,
"--output_path",
path_mails_to_send_no_separator,
],
)
all_mails_target = get_all_mail_names_from_path(
glob.glob("/".join((path_mails_to_send_no_separator, "*@*")))
)
assert result.exit_code == 0
assert len(all_mails_base_no_separator) == len(all_mails_target)
assert compare_rendered_mail_all(
all_mails_target,
base_prefix=path_pre_rendered_mails_no_separator,
target_prefix=path_mails_to_send_no_separator,
)
def test_rendered_mail_with_separator_dash(all_mails_base_with_separator):
runner = CliRunner()
result = runner.invoke(
main,
[
path_j2,
path_receivers_json,
"--output_path",
path_mails_to_send_with_separator,
"--separator",
" - ",
],
)
all_mails_target = get_all_mail_names_from_path(
glob.glob("/".join((path_mails_to_send_with_separator, "*@*")))
)
assert result.exit_code == 0
assert len(all_mails_base_with_separator) == len(all_mails_target)
assert compare_rendered_mail_all(
all_mails_target,
base_prefix=path_pre_rendered_mails_with_separator,
target_prefix=path_mails_to_send_with_separator,
)
| 29.072464
| 74
| 0.698903
|
fe624208883847b2782c1d79d06367ff243ee98d
| 50
|
py
|
Python
|
repertoire/term.py
|
agajews/repertoire
|
239a24c416a145c77d9d2ee0ff5380f1246c6159
|
[
"MIT"
] | 3
|
2019-02-17T17:13:23.000Z
|
2020-04-07T14:47:40.000Z
|
repertoire/term.py
|
agajews/repertoire
|
239a24c416a145c77d9d2ee0ff5380f1246c6159
|
[
"MIT"
] | null | null | null |
repertoire/term.py
|
agajews/repertoire
|
239a24c416a145c77d9d2ee0ff5380f1246c6159
|
[
"MIT"
] | null | null | null |
from blessings import Terminal
term = Terminal()
| 12.5
| 30
| 0.78
|
1fa9f82954e034138a8c88c0f7df96273747eaf1
| 3,227
|
py
|
Python
|
ooiservices/app/uframe/subscribe.py
|
asascience-open/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 2
|
2015-02-28T00:20:30.000Z
|
2015-04-30T12:40:31.000Z
|
ooiservices/app/uframe/subscribe.py
|
asascience-open/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 266
|
2015-01-02T21:29:25.000Z
|
2020-01-23T16:00:11.000Z
|
ooiservices/app/uframe/subscribe.py
|
oceanobservatories/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 13
|
2015-02-04T21:13:34.000Z
|
2016-10-18T14:39:36.000Z
|
#!/usr/bin/env python
'''
Subscription services for uframe.
'''
__author__ = 'M@Campbell'
__created__ = '11/04/2015'
from flask import request, current_app as app
from ooiservices.app.uframe import uframe as api
from ooiservices.app.main.authentication import auth
from ooiservices.app.main.errors import bad_request
from ooiservices.app.uframe.config import get_uframe_timeout_info
import requests
from requests.exceptions import ConnectionError, Timeout
headers = {'Content-Type': 'application/json'}
@auth.login_required
@api.route('/subscription', methods=['GET'])
def get_subscription():
"""
"""
debug = False
try:
if debug: print '\n debug -- get_subscription...'
# Get uframe connect and timeout information
timeout, timeout_read = get_uframe_timeout_info()
if request.args is not None:
if debug: print '\n debug -- request.args....'
res = requests.get(
app.config['UFRAME_SUBSCRIBE_URL']+'/subscription',
params=request.args,
timeout=(timeout, timeout_read))
else:
if debug: print '\n debug -- No request.args....'
res = requests.get(app.config['UFRAME_SUBSCRIBE_URL']+'/subscription', timeout=(timeout, timeout_read))
return res.text, res.status_code
except ConnectionError:
message = 'ConnectionError getting subscription.'
return bad_request(message)
except Timeout:
message = 'Timeout getting subscription.'
return bad_request(message)
except Exception as err:
message = str(err)
return bad_request(message)
@auth.login_required
@api.route('/subscription', methods=['POST'])
def create_subscription():
try:
# Get uframe connect and timeout information
timeout, timeout_read = get_uframe_timeout_info()
res = requests.post(
app.config['UFRAME_SUBSCRIBE_URL']+'/subscription',
data=request.data,
headers=headers,
timeout=(timeout, timeout_read))
return res.text, res.status_code
except ConnectionError:
message = 'ConnectionError getting subscription.'
return bad_request(message)
except Timeout:
message = 'Timeout getting subscription.'
return bad_request(message)
except Exception as err:
message = str(err)
return bad_request(message)
@auth.login_required
@api.route('/subscription/<int:id>', methods=['DELETE'])
def delete_subscription(id):
try:
# Get uframe connect and timeout information
timeout, timeout_read = get_uframe_timeout_info()
res = requests.delete(
app.config['UFRAME_SUBSCRIBE_URL']+'/subscription/%s' % id,
timeout=(timeout, timeout_read))
return res.text, res.status_code
except ConnectionError:
message = 'ConnectionError getting subscription.'
return bad_request(message)
except Timeout:
message = 'Timeout getting subscription.'
return bad_request(message)
except Exception as err:
message = str(err)
return bad_request(message)
| 32.928571
| 115
| 0.650449
|
01f53b60add151dc0f8154c1139f578f6050a07d
| 1,544
|
py
|
Python
|
enaml/socket_interface.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 11
|
2015-01-04T14:29:23.000Z
|
2019-12-25T05:38:37.000Z
|
enaml/socket_interface.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 36
|
2015-02-20T00:56:53.000Z
|
2020-12-04T10:02:14.000Z
|
enaml/socket_interface.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 3
|
2015-11-19T15:11:37.000Z
|
2019-03-11T23:45:02.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
class ActionSocketInterface(object):
""" An abstract base class defining an action socket interface.
Concrete implementations of this interface can be used by Session
instances to send and recieve messages to and from their client
objects.
"""
__metaclass__ = ABCMeta
@abstractmethod
def on_message(self, callback):
""" Register a callback for receiving messages sent by a
client object.
Parameters
----------
callback : callable
A callable with an argument signature that is equivalent to
the `send` method. If the callback is a bound method, then
the lifetime of the callback will be bound to lifetime of
the method owner object.
"""
raise NotImplementedError
@abstractmethod
def send(self, object_id, action, content):
""" Send an action to the client of an object.
Parameters
----------
object_id : str
The object id for the Object sending the message.
action : str
The action that should be take by the client object.
content : dict
The dictionary of content needed to perform the action.
"""
raise NotImplementedError
| 29.692308
| 79
| 0.56671
|
e6cf776932e7c55e72c50bff578bfa41764550f5
| 797
|
py
|
Python
|
OpenCV/p5.1.py
|
iamprasann/Autumn-of-Automation
|
e2e20e004524a2710c3a5b2e1b46b023106c0adb
|
[
"MIT"
] | null | null | null |
OpenCV/p5.1.py
|
iamprasann/Autumn-of-Automation
|
e2e20e004524a2710c3a5b2e1b46b023106c0adb
|
[
"MIT"
] | null | null | null |
OpenCV/p5.1.py
|
iamprasann/Autumn-of-Automation
|
e2e20e004524a2710c3a5b2e1b46b023106c0adb
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
cap = cv2.VideoCapture('messi.mp4')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_blurred = cv2.blur(gray, (3, 3))
detected_circles = cv2.HoughCircles(gray_blurred, cv2.HOUGH_GRADIENT, 1, 20, param1 = 50, param2 = 30, minRadius = 1, maxRadius = 40)
if detected_circles is not None:
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
cv2.circle(frame, (a, b), r, (0, 255, 0), 2)
cv2.circle(frame, (a, b), 1, (0, 0, 255), 3)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cv2.imshow("Detected", frame)
cv2.destroyAllWindows()
| 30.653846
| 138
| 0.579674
|
474d90e9f108fdb0785b27573430a07b44e93e45
| 2,815
|
py
|
Python
|
azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py
|
onap/archive-multicloud-azure
|
d3562b78879a900510c8ecb1241156d5b9bb50f6
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2018-10-13T06:32:10.000Z
|
2018-10-13T06:32:10.000Z
|
azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py
|
onap/archive-multicloud-azure
|
d3562b78879a900510c8ecb1241156d5b9bb50f6
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py
|
onap/archive-multicloud-azure
|
d3562b78879a900510c8ecb1241156d5b9bb50f6
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..service_templates import (consume_use_case, consume_node_cellar)
# Use Cases
def test_use_case_compute_1():
consume_use_case('compute-1', 'instance')
def test_use_case_software_component_1():
consume_use_case('software-component-1', 'instance')
def test_use_case_block_storage_1():
consume_use_case('block-storage-1', 'instance')
def test_use_case_block_storage_2():
consume_use_case('block-storage-2', 'instance')
def test_use_case_block_storage_3():
consume_use_case('block-storage-3', 'instance')
def test_use_case_block_storage_4():
consume_use_case('block-storage-4', 'instance')
def test_use_case_block_storage_5():
consume_use_case('block-storage-5', 'instance')
def test_use_case_block_storage_6():
consume_use_case('block-storage-6', 'instance')
def test_use_case_object_storage_1():
consume_use_case('object-storage-1', 'instance')
def test_use_case_network_1():
consume_use_case('network-1', 'instance')
def test_use_case_network_2():
consume_use_case('network-2', 'instance')
def test_use_case_network_3():
consume_use_case('network-3', 'instance')
def test_use_case_network_4():
consume_use_case('network-4', 'instance')
def test_use_case_webserver_dbms_1():
consume_use_case('webserver-dbms-1', 'template')
def test_use_case_webserver_dbms_2():
consume_use_case('webserver-dbms-2', 'instance')
def test_use_case_multi_tier_1():
consume_use_case('multi-tier-1', 'instance')
def test_use_case_container_1():
consume_use_case('container-1', 'template')
# NodeCellar
def test_node_cellar_validation():
consume_node_cellar('validate')
def test_node_cellar_validation_no_cache():
consume_node_cellar('validate', False)
def test_node_cellar_presentation():
consume_node_cellar('presentation')
def test_node_cellar_model():
consume_node_cellar('template')
def test_node_cellar_types():
consume_node_cellar('types')
def test_node_cellar_instance():
consume_node_cellar('instance')
| 24.911504
| 74
| 0.765542
|
a5aae19a7a649675f6b4ef4585064e4c0ee9e0f4
| 666
|
py
|
Python
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryBatchSubInDTO.py
|
yuanyi-thu/AIOT-
|
27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e
|
[
"BSD-3-Clause"
] | 128
|
2018-10-29T04:11:47.000Z
|
2022-03-07T02:19:14.000Z
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryBatchSubInDTO.py
|
yuanyi-thu/AIOT-
|
27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e
|
[
"BSD-3-Clause"
] | 40
|
2018-11-02T00:40:48.000Z
|
2021-12-07T09:33:56.000Z
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryBatchSubInDTO.py
|
yuanyi-thu/AIOT-
|
27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e
|
[
"BSD-3-Clause"
] | 118
|
2018-10-29T08:43:57.000Z
|
2022-01-07T06:49:25.000Z
|
class QueryBatchSubInDTO(object):
def __init__(self):
self.appId = None
self.notifyType = None
self.pageNo = None
self.pageSize = None
def getAppId(self):
return self.appId
def setAppId(self, appId):
self.appId = appId
def getNotifyType(self):
return self.notifyType
def setNotifyType(self, notifyType):
self.notifyType = notifyType
def getPageNo(self):
return self.pageNo
def setPageNo(self, pageNo):
self.pageNo = pageNo
def getPageSize(self):
return self.pageSize
def setPageSize(self, pageSize):
self.pageSize = pageSize
| 20.8125
| 40
| 0.623123
|
900ef541f2a875f3ff68caaa171ac18331a1f73e
| 693
|
py
|
Python
|
ocs_ci/framework/testlib.py
|
anubhav-here/ocs-ci
|
cf922411c721922f58d852167ff9ab8a8a6e15a6
|
[
"MIT"
] | null | null | null |
ocs_ci/framework/testlib.py
|
anubhav-here/ocs-ci
|
cf922411c721922f58d852167ff9ab8a8a6e15a6
|
[
"MIT"
] | null | null | null |
ocs_ci/framework/testlib.py
|
anubhav-here/ocs-ci
|
cf922411c721922f58d852167ff9ab8a8a6e15a6
|
[
"MIT"
] | null | null | null |
import pytest
from ocs_ci.framework.pytest_customization.marks import * # noqa: F403
@pytest.mark.usefixtures( # noqa: F405
'run_io_in_background', 'environment_checker'
)
class BaseTest:
"""
Base test class for our testing.
If some functionality/property needs to be implemented in all test classes
here is the place to put your code.
"""
pass
@e2e # noqa: F405
class E2ETest(BaseTest):
"""
Base class for E2E team
"""
pass
@manage # noqa: F405
class ManageTest(BaseTest):
"""
Base class for E2E team
"""
pass
@ecosystem # noqa: F405
class EcosystemTest(BaseTest):
"""
Base class for E2E team
"""
pass
| 17.325
| 78
| 0.650794
|
6f1bda3f3a0c06827053dbdc1836a56f9996c0cd
| 1,252
|
py
|
Python
|
tests/zxcvbn_test.py
|
robotmlg/zxcvbn-python
|
db37117c3e8a6217047c12e00baa05cb14bed0f8
|
[
"MIT"
] | 1
|
2021-11-26T10:29:29.000Z
|
2021-11-26T10:29:29.000Z
|
tests/zxcvbn_test.py
|
robotmlg/zxcvbn-python
|
db37117c3e8a6217047c12e00baa05cb14bed0f8
|
[
"MIT"
] | null | null | null |
tests/zxcvbn_test.py
|
robotmlg/zxcvbn-python
|
db37117c3e8a6217047c12e00baa05cb14bed0f8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from zxcvbn import zxcvbn
def test_unicode_user_inputs():
# test Issue #12 -- don't raise a UnicodeError with unicode user_inputs or
# passwords.
input_ = u'Фамилия'
password = u'pÄssword junkiË'
zxcvbn(password, user_inputs=[input_])
def test_invalid_user_inputs():
# don't raise an error with non-string types for user_inputs
input_ = None
password = u'pÄssword junkiË'
zxcvbn(password, user_inputs=[input_])
def test_long_password():
input_ = None
password = "weopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioejiojweopiopdsjmkldjvoisdjfioej"
zxcvbn(password, user_inputs=[input_])
def test_dictionary_password():
# return the correct error message for a english match
input_ = None
password = "musculature"
result = zxcvbn(password, user_inputs=[input_])
assert result["feedback"]["warning"] == \
"A word by itself is easy to guess.", \
"Gives specific error for single-word passwords"
| 32.102564
| 333
| 0.757188
|
7a7eacff749935692f0fdf2752cd76e992399dda
| 4,529
|
py
|
Python
|
hooks/_gh_actions_expand.py
|
andersjohansson/zotero-better-bibtex
|
240713522ae83d3df8f08dfd204706c4f96278b7
|
[
"MIT"
] | 1
|
2022-03-25T03:21:34.000Z
|
2022-03-25T03:21:34.000Z
|
hooks/_gh_actions_expand.py
|
send2dan/zotero-better-bibtex
|
0d38ddb09b4dca42be81729886b40c8c743c8543
|
[
"MIT"
] | null | null | null |
hooks/_gh_actions_expand.py
|
send2dan/zotero-better-bibtex
|
0d38ddb09b4dca42be81729886b40c8c743c8543
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright © 2020 Elijah Shaw-Rutschman
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Interpolates aliases in YAML.
Reads YAML from standard input, writes YAML to standard output. Comments and
formatting are retained.
This is useful if, for instance, you want to use aliases with a platform
that does not support them, such as Github Actions. You can define your anchors
in a top-level _anchors section, and that section will be omitted from the
output. The script can either be run manually prior to committing your changes,
or can be automated via a git hook.
Example input:
_anchors:
checkout_repo: &checkout_repo
name: Checkout repo
uses: actions/checkout@v2
with:
fetch-depth: 1
submodules: recursive
install_pip_requirements: &install_pip_requirements
name: Install pip requirements
run: |
pip install -r requirements.txt
name: Build
on:
push:
branches: [ '*' ]
tags: [ '*' ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- *checkout_repo
- *install_pip_requirements
The output would be:
name: Build
on: null
push:
branches: ['*']
tags: ['*']
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v2
with:
fetch-depth: 1
submodules: recursive
- name: Install pip requirements
run: |
pip install -r requirements.txt
"""
import os
import sys
from glob import glob
from ruamel import yaml
import subprocess
import json
class InterpolatingDumper(yaml.RoundTripDumper):
def ignore_aliases(self, data):
# Always interpolate aliases
return True
def interpolate_aliases(in_stream, out_stream):
data = yaml.load(in_stream, Loader=yaml.RoundTripLoader)
if '_anchors' in data:
# Remove top-level _anchors section
del data['_anchors']
out_stream.write(yaml.round_trip_dump(data, Dumper=InterpolatingDumper, width=5000))
def run(cmd):
print('$ ' + ' '.join(cmd))
result = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8')
print(result)
return result
for change in run('git diff --cached --name-status'.split(' ')).split('\n'):
if change.strip() == '': continue
if change[0] == 'R':
status, path, new = change.split("\t")
status = status[0]
else:
status, path = change.split("\t")
if os.path.dirname(path) == '.github/workflows':
src = os.path.join(os.path.dirname(path), 'src', os.path.basename(path))
if os.path.exists(src) and os.path.getmtime(path) > os.path.getmtime(src):
print(f'{src} is older than {path}')
sys.exit(1)
if os.path.dirname(path) != '.github/workflows/src': continue
src = path
tgt = os.path.join(os.path.dirname(os.path.dirname(src)), os.path.basename(src))
action = None
if status in ['M', 'A']:
action = ['git', 'add', tgt]
elif status == 'D':
action = 'rm'
if os.path.exists(tgt): action = ['git', 'rm', tgt]
elif status == 'R':
old_tgt = tgt
src = new
tgt = os.path.join(os.path.dirname(os.path.dirname(src)), os.path.basename(src))
if os.path.exists(tgt): os.remove(tgt)
if os.path.exists(old_tgt): run(['git', 'mv', old_tgt, tgt])
action = ['git', 'add', tgt]
assert tgt != src
print(src, '=>', tgt)
with open(src) as in_stream, open(tgt, 'w') as out_stream:
interpolate_aliases(in_stream, out_stream)
if action:
run(action)
| 31.894366
| 86
| 0.684478
|
fa9330eccb6d317f578276afbe89855a14af27a8
| 495
|
py
|
Python
|
LinkedListImpPython.py
|
tonymongare/DataStructures
|
76f05100eb223bf72ce3e3c3e3aa126945053b75
|
[
"MIT"
] | 2
|
2021-09-25T09:09:47.000Z
|
2022-03-16T09:08:50.000Z
|
LinkedListImpPython.py
|
tonymongare/DataStructures
|
76f05100eb223bf72ce3e3c3e3aa126945053b75
|
[
"MIT"
] | null | null | null |
LinkedListImpPython.py
|
tonymongare/DataStructures
|
76f05100eb223bf72ce3e3c3e3aa126945053b75
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, item):
self.item = item
self.next = None
class LinkedList:
def __init__(self):
self.head = None
if __name__ == '__main__':
linked_list = LinkedList()
linked_list.head = Node('A')
second = Node('B')
third = Node('C')
linked_list.head.next = second
second.next = third
while linked_list.head != None:
print(linked_list.head.item, end=" ")
linked_list.head = linked_list.head.next
| 17.678571
| 48
| 0.60404
|
71115739840eab17c75aa649c5dbf000bdaf2e2e
| 1,225
|
py
|
Python
|
byceps/services/board/dbmodels/last_topic_view.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 23
|
2015-08-03T23:28:54.000Z
|
2018-12-12T20:11:45.000Z
|
byceps/services/board/dbmodels/last_topic_view.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 1
|
2018-09-30T18:18:24.000Z
|
2018-09-30T18:18:24.000Z
|
byceps/services/board/dbmodels/last_topic_view.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 9
|
2015-08-06T16:41:36.000Z
|
2018-09-25T11:17:31.000Z
|
"""
byceps.services.board.dbmodels.last_topic_view
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from ....database import db
from ....typing import UserID
from ....util.instances import ReprBuilder
from ..transfer.models import TopicID
from .topic import Topic
class LastTopicView(db.Model):
"""The last time a user looked into specific topic."""
__tablename__ = 'board_topics_lastviews'
user_id = db.Column(db.Uuid, db.ForeignKey('users.id'), primary_key=True)
topic_id = db.Column(db.Uuid, db.ForeignKey('board_topics.id'), primary_key=True)
topic = db.relationship(Topic)
occurred_at = db.Column(db.DateTime, nullable=False)
def __init__(
self, user_id: UserID, topic_id: TopicID, occurred_at: datetime
) -> None:
self.user_id = user_id
self.topic_id = topic_id
self.occurred_at = occurred_at
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('user_id') \
.add('topic', self.topic.title) \
.add_with_lookup('occurred_at') \
.build()
| 28.488372
| 85
| 0.652245
|
77e2e9043a0e4285ce2325058e006751a45527e7
| 221
|
py
|
Python
|
test/normalizer_issue_files/E73.py
|
bryanforbes/parso
|
93206f6eba3fe29a200c328c11535e58c56e9635
|
[
"PSF-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
test/normalizer_issue_files/E73.py
|
bryanforbes/parso
|
93206f6eba3fe29a200c328c11535e58c56e9635
|
[
"PSF-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
test/normalizer_issue_files/E73.py
|
bryanforbes/parso
|
93206f6eba3fe29a200c328c11535e58c56e9635
|
[
"PSF-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
#: E731:4
f = lambda x: 2 * x
while False:
#: E731:10
foo = lambda y, z: 2 * x
# Okay
f = object()
f.method = lambda: 'Method'
f = {}
f['a'] = lambda x: x ** 2
f = []
f.append(lambda x: x ** 2)
lambda: 'no-op'
| 13
| 28
| 0.511312
|
34cd2415a54643aad58144df8022e646df2633c3
| 10,243
|
py
|
Python
|
moto/medialive/models.py
|
richford/moto
|
3b9e6261f9d0f816bae76547c42acf4f16a36482
|
[
"Apache-2.0"
] | 12
|
2018-07-03T15:02:48.000Z
|
2021-11-09T09:34:57.000Z
|
moto/medialive/models.py
|
richford/moto
|
3b9e6261f9d0f816bae76547c42acf4f16a36482
|
[
"Apache-2.0"
] | 33
|
2019-08-30T14:36:03.000Z
|
2022-02-04T10:23:49.000Z
|
moto/medialive/models.py
|
richford/moto
|
3b9e6261f9d0f816bae76547c42acf4f16a36482
|
[
"Apache-2.0"
] | 26
|
2018-05-09T08:29:37.000Z
|
2021-12-27T13:46:05.000Z
|
from collections import OrderedDict
from uuid import uuid4
from boto3 import Session
from moto.core import BaseBackend, BaseModel
class Input(BaseModel):
def __init__(self, *args, **kwargs):
self.arn = kwargs.get("arn")
self.attached_channels = kwargs.get("attached_channels", [])
self.destinations = kwargs.get("destinations", [])
self.input_id = kwargs.get("input_id")
self.input_class = kwargs.get("input_class", "STANDARD")
self.input_devices = kwargs.get("input_devices", [])
self.input_source_type = kwargs.get("input_source_type", "STATIC")
self.media_connect_flows = kwargs.get("media_connect_flows", [])
self.name = kwargs.get("name")
self.role_arn = kwargs.get("role_arn")
self.security_groups = kwargs.get("security_groups", [])
self.sources = kwargs.get("sources", [])
# Possible states: 'CREATING'|'DETACHED'|'ATTACHED'|'DELETING'|'DELETED'
self.state = kwargs.get("state")
self.tags = kwargs.get("tags")
self.input_type = kwargs.get("input_type")
def to_dict(self):
data = {
"arn": self.arn,
"attachedChannels": self.attached_channels,
"destinations": self.destinations,
"id": self.input_id,
"inputClass": self.input_class,
"inputDevices": self.input_devices,
"inputSourceType": self.input_source_type,
"mediaConnectFlows": self.media_connect_flows,
"name": self.name,
"roleArn": self.role_arn,
"securityGroups": self.security_groups,
"sources": self.sources,
"state": self.state,
"tags": self.tags,
"type": self.input_type,
}
return data
def _resolve_transient_states(self):
# Resolve transient states before second call
# (to simulate AWS taking its sweet time with these things)
if self.state in ["CREATING"]:
self.state = "DETACHED" # or ATTACHED
elif self.state == "DELETING":
self.state = "DELETED"
class Channel(BaseModel):
def __init__(self, *args, **kwargs):
self.arn = kwargs.get("arn")
self.cdi_input_specification = kwargs.get("cdi_input_specification")
self.channel_class = kwargs.get("channel_class", "STANDARD")
self.destinations = kwargs.get("destinations")
self.egress_endpoints = kwargs.get("egress_endpoints", [])
self.encoder_settings = kwargs.get("encoder_settings")
self.channel_id = kwargs.get("channel_id")
self.input_attachments = kwargs.get("input_attachments")
self.input_specification = kwargs.get("input_specification")
self.log_level = kwargs.get("log_level")
self.name = kwargs.get("name")
self.pipeline_details = kwargs.get("pipeline_details", [])
self.role_arn = kwargs.get("role_arn")
self.state = kwargs.get("state")
self.tags = kwargs.get("tags")
self._previous_state = None
def to_dict(self, exclude=None):
data = {
"arn": self.arn,
"cdiInputSpecification": self.cdi_input_specification,
"channelClass": self.channel_class,
"destinations": self.destinations,
"egressEndpoints": self.egress_endpoints,
"encoderSettings": self.encoder_settings,
"id": self.channel_id,
"inputAttachments": self.input_attachments,
"inputSpecification": self.input_specification,
"logLevel": self.log_level,
"name": self.name,
"pipelineDetails": self.pipeline_details,
"pipelinesRunningCount": 1
if self.channel_class == "SINGLE_PIPELINE"
else 2,
"roleArn": self.role_arn,
"state": self.state,
"tags": self.tags,
}
if exclude:
for key in exclude:
del data[key]
return data
def _resolve_transient_states(self):
# Resolve transient states before second call
# (to simulate AWS taking its sweet time with these things)
if self.state in ["CREATING", "STOPPING"]:
self.state = "IDLE"
elif self.state == "STARTING":
self.state = "RUNNING"
elif self.state == "DELETING":
self.state = "DELETED"
elif self.state == "UPDATING":
self.state = self._previous_state
self._previous_state = None
class MediaLiveBackend(BaseBackend):
def __init__(self, region_name=None):
super(MediaLiveBackend, self).__init__()
self.region_name = region_name
self._channels = OrderedDict()
self._inputs = OrderedDict()
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_channel(
self,
cdi_input_specification,
channel_class,
destinations,
encoder_settings,
input_attachments,
input_specification,
log_level,
name,
request_id,
reserved,
role_arn,
tags,
):
channel_id = uuid4().hex
arn = "arn:aws:medialive:channel:{}".format(channel_id)
channel = Channel(
arn=arn,
cdi_input_specification=cdi_input_specification,
channel_class=channel_class or "STANDARD",
destinations=destinations,
egress_endpoints=[],
encoder_settings=encoder_settings,
channel_id=channel_id,
input_attachments=input_attachments,
input_specification=input_specification,
log_level=log_level,
name=name,
pipeline_details=[],
role_arn=role_arn,
state="CREATING",
tags=tags,
)
self._channels[channel_id] = channel
return channel
def list_channels(self, max_results, next_token):
channels = list(self._channels.values())
if max_results is not None:
channels = channels[:max_results]
response_channels = [
c.to_dict(exclude=["encoderSettings", "pipelineDetails"]) for c in channels
]
return response_channels, next_token
def describe_channel(self, channel_id):
channel = self._channels[channel_id]
channel._resolve_transient_states()
return channel.to_dict()
def delete_channel(self, channel_id):
channel = self._channels[channel_id]
channel.state = "DELETING"
return channel.to_dict()
def start_channel(self, channel_id):
channel = self._channels[channel_id]
channel.state = "STARTING"
return channel.to_dict()
def stop_channel(self, channel_id):
channel = self._channels[channel_id]
channel.state = "STOPPING"
return channel.to_dict()
def update_channel(
self,
channel_id,
cdi_input_specification,
destinations,
encoder_settings,
input_attachments,
input_specification,
log_level,
name,
role_arn,
):
channel = self._channels[channel_id]
channel.cdi_input_specification = cdi_input_specification
channel.destinations = destinations
channel.encoder_settings = encoder_settings
channel.input_attachments = input_attachments
channel.input_specification = input_specification
channel.log_level = log_level
channel.name = name
channel.role_arn = role_arn
channel._resolve_transient_states()
channel._previous_state = channel.state
channel.state = "UPDATING"
return channel
def create_input(
self,
destinations,
input_devices,
input_security_groups,
media_connect_flows,
name,
request_id,
role_arn,
sources,
tags,
type,
vpc,
):
input_id = uuid4().hex
arn = "arn:aws:medialive:input:{}".format(input_id)
a_input = Input(
arn=arn,
input_id=input_id,
destinations=destinations,
input_devices=input_devices,
input_security_groups=input_security_groups,
media_connect_flows=media_connect_flows,
name=name,
role_arn=role_arn,
sources=sources,
tags=tags,
input_type=type,
state="CREATING",
)
self._inputs[input_id] = a_input
return a_input
def describe_input(self, input_id):
a_input = self._inputs[input_id]
a_input._resolve_transient_states()
return a_input.to_dict()
def list_inputs(self, max_results, next_token):
inputs = list(self._inputs.values())
if max_results is not None:
inputs = inputs[:max_results]
response_inputs = [i.to_dict() for i in inputs]
return response_inputs, next_token
def delete_input(self, input_id):
a_input = self._inputs[input_id]
a_input.state = "DELETING"
return a_input.to_dict()
def update_input(
self,
destinations,
input_devices,
input_id,
input_security_groups,
media_connect_flows,
name,
role_arn,
sources,
):
a_input = self._inputs[input_id]
a_input.destinations = destinations
a_input.input_devices = input_devices
a_input.security_groups = input_security_groups
a_input.media_connect_flows = media_connect_flows
a_input.name = name
a_input.role_arn = role_arn
a_input.sources = sources
return a_input
medialive_backends = {}
for region in Session().get_available_regions("medialive"):
medialive_backends[region] = MediaLiveBackend()
for region in Session().get_available_regions("medialive", partition_name="aws-us-gov"):
medialive_backends[region] = MediaLiveBackend()
for region in Session().get_available_regions("medialive", partition_name="aws-cn"):
medialive_backends[region] = MediaLiveBackend()
| 34.0299
| 88
| 0.614957
|
8a985a4560b5b5ecb6ccb71743f3b214b1de20a6
| 32,397
|
py
|
Python
|
diff_dalle/gaussian_diffusion.py
|
afiaka87/Diff-DALLE
|
649d8a7093b67a23befc79ee4bbdbcc43d373b1a
|
[
"MIT"
] | 2
|
2021-07-19T17:31:10.000Z
|
2021-07-19T23:37:15.000Z
|
diff_dalle/gaussian_diffusion.py
|
afiaka87/Diff-DALLE
|
649d8a7093b67a23befc79ee4bbdbcc43d373b1a
|
[
"MIT"
] | null | null | null |
diff_dalle/gaussian_diffusion.py
|
afiaka87/Diff-DALLE
|
649d8a7093b67a23befc79ee4bbdbcc43d373b1a
|
[
"MIT"
] | null | null | null |
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat, clip_loss
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
clip_coeff=0,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
self.clip_coeff = clip_coeff
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, clip=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
if self.clip_coeff != 0:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x_t.shape)
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
pred_xstart = self._predict_xstart_from_eps(x_t=x_t, t=t, eps=model_output)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x_t, t=t)
noise = th.randn_like(x_t)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x_t.shape) - 1)))
) # no noise when t == 0
images = model_mean + nonzero_mask * th.exp(0.5 * model_log_variance) * noise
terms["clip"] = self.clip_coeff * clip_loss(*clip(images, t, **model_kwargs))
terms["loss"] += terms["clip"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 37.758741
| 129
| 0.584159
|
a2f936fe5d1ccef6cf2128005da864a15b5bea99
| 112,071
|
py
|
Python
|
dlp/google/cloud/dlp_v2/proto/storage_pb2.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2019-04-16T08:13:06.000Z
|
2019-04-16T08:13:06.000Z
|
dlp/google/cloud/dlp_v2/proto/storage_pb2.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | null | null | null |
dlp/google/cloud/dlp_v2/proto/storage_pb2.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2020-11-30T02:23:29.000Z
|
2020-11-30T02:23:29.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/privacy/dlp_v2/proto/storage.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/privacy/dlp_v2/proto/storage.proto",
package="google.privacy.dlp.v2",
syntax="proto3",
serialized_pb=_b(
'\n/google/cloud/privacy/dlp_v2/proto/storage.proto\x12\x15google.privacy.dlp.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x18\n\x08InfoType\x12\x0c\n\x04name\x18\x01 \x01(\t"K\n\nStoredType\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\x0b\x63reate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb1\x0b\n\x0e\x43ustomInfoType\x12\x32\n\tinfo_type\x18\x01 \x01(\x0b\x32\x1f.google.privacy.dlp.v2.InfoType\x12\x35\n\nlikelihood\x18\x06 \x01(\x0e\x32!.google.privacy.dlp.v2.Likelihood\x12\x46\n\ndictionary\x18\x02 \x01(\x0b\x32\x30.google.privacy.dlp.v2.CustomInfoType.DictionaryH\x00\x12<\n\x05regex\x18\x03 \x01(\x0b\x32+.google.privacy.dlp.v2.CustomInfoType.RegexH\x00\x12M\n\x0esurrogate_type\x18\x04 \x01(\x0b\x32\x33.google.privacy.dlp.v2.CustomInfoType.SurrogateTypeH\x00\x12\x38\n\x0bstored_type\x18\x05 \x01(\x0b\x32!.google.privacy.dlp.v2.StoredTypeH\x00\x12L\n\x0f\x64\x65tection_rules\x18\x07 \x03(\x0b\x32\x33.google.privacy.dlp.v2.CustomInfoType.DetectionRule\x12K\n\x0e\x65xclusion_type\x18\x08 \x01(\x0e\x32\x33.google.privacy.dlp.v2.CustomInfoType.ExclusionType\x1a\xc8\x01\n\nDictionary\x12N\n\tword_list\x18\x01 \x01(\x0b\x32\x39.google.privacy.dlp.v2.CustomInfoType.Dictionary.WordListH\x00\x12\x45\n\x12\x63loud_storage_path\x18\x03 \x01(\x0b\x32\'.google.privacy.dlp.v2.CloudStoragePathH\x00\x1a\x19\n\x08WordList\x12\r\n\x05words\x18\x01 \x03(\tB\x08\n\x06source\x1a\x18\n\x05Regex\x12\x0f\n\x07pattern\x18\x01 \x01(\t\x1a\x0f\n\rSurrogateType\x1a\xbe\x04\n\rDetectionRule\x12W\n\x0chotword_rule\x18\x01 \x01(\x0b\x32?.google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRuleH\x00\x1a\x38\n\tProximity\x12\x15\n\rwindow_before\x18\x01 \x01(\x05\x12\x14\n\x0cwindow_after\x18\x02 \x01(\x05\x1a\x82\x01\n\x14LikelihoodAdjustment\x12=\n\x10\x66ixed_likelihood\x18\x01 \x01(\x0e\x32!.google.privacy.dlp.v2.LikelihoodH\x00\x12\x1d\n\x13relative_likelihood\x18\x02 \x01(\x05H\x00\x42\x0c\n\nadjustment\x1a\x8c\x02\n\x0bHotwordRule\x12\x42\n\rhotword_regex\x18\x01 \x01(\x0b\x32+.google.privacy.dlp.v2.CustomInfoType.Regex\x12P\n\tproximity\x18\x02 \x01(\x0b\x32=.google.privacy.dlp.v2.CustomInfoType.DetectionRule.Proximity\x12g\n\x15likelihood_adjustment\x18\x03 \x01(\x0b\x32H.google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustmentB\x06\n\x04type"K\n\rExclusionType\x12\x1e\n\x1a\x45XCLUSION_TYPE_UNSPECIFIED\x10\x00\x12\x1a\n\x16\x45XCLUSION_TYPE_EXCLUDE\x10\x01\x42\x06\n\x04type"\x17\n\x07\x46ieldId\x12\x0c\n\x04name\x18\x01 \x01(\t"7\n\x0bPartitionId\x12\x12\n\nproject_id\x18\x02 \x01(\t\x12\x14\n\x0cnamespace_id\x18\x04 \x01(\t"\x1e\n\x0eKindExpression\x12\x0c\n\x04name\x18\x01 \x01(\t"\x81\x01\n\x10\x44\x61tastoreOptions\x12\x38\n\x0cpartition_id\x18\x01 \x01(\x0b\x32".google.privacy.dlp.v2.PartitionId\x12\x33\n\x04kind\x18\x02 \x01(\x0b\x32%.google.privacy.dlp.v2.KindExpression"]\n\x18\x43loudStorageRegexFileSet\x12\x13\n\x0b\x62ucket_name\x18\x01 \x01(\t\x12\x15\n\rinclude_regex\x18\x02 \x03(\t\x12\x15\n\rexclude_regex\x18\x03 \x03(\t"\xec\x03\n\x13\x43loudStorageOptions\x12\x44\n\x08\x66ile_set\x18\x01 \x01(\x0b\x32\x32.google.privacy.dlp.v2.CloudStorageOptions.FileSet\x12\x1c\n\x14\x62ytes_limit_per_file\x18\x04 \x01(\x03\x12$\n\x1c\x62ytes_limit_per_file_percent\x18\x08 \x01(\x05\x12\x33\n\nfile_types\x18\x05 \x03(\x0e\x32\x1f.google.privacy.dlp.v2.FileType\x12N\n\rsample_method\x18\x06 \x01(\x0e\x32\x37.google.privacy.dlp.v2.CloudStorageOptions.SampleMethod\x12\x1b\n\x13\x66iles_limit_percent\x18\x07 \x01(\x05\x1a_\n\x07\x46ileSet\x12\x0b\n\x03url\x18\x01 \x01(\t\x12G\n\x0eregex_file_set\x18\x02 \x01(\x0b\x32/.google.privacy.dlp.v2.CloudStorageRegexFileSet"H\n\x0cSampleMethod\x12\x1d\n\x19SAMPLE_METHOD_UNSPECIFIED\x10\x00\x12\x07\n\x03TOP\x10\x01\x12\x10\n\x0cRANDOM_START\x10\x02""\n\x13\x43loudStorageFileSet\x12\x0b\n\x03url\x18\x01 \x01(\t" \n\x10\x43loudStoragePath\x12\x0c\n\x04path\x18\x01 \x01(\t"\x8b\x03\n\x0f\x42igQueryOptions\x12=\n\x0ftable_reference\x18\x01 \x01(\x0b\x32$.google.privacy.dlp.v2.BigQueryTable\x12:\n\x12identifying_fields\x18\x02 \x03(\x0b\x32\x1e.google.privacy.dlp.v2.FieldId\x12\x12\n\nrows_limit\x18\x03 \x01(\x03\x12\x1a\n\x12rows_limit_percent\x18\x06 \x01(\x05\x12J\n\rsample_method\x18\x04 \x01(\x0e\x32\x33.google.privacy.dlp.v2.BigQueryOptions.SampleMethod\x12\x37\n\x0f\x65xcluded_fields\x18\x05 \x03(\x0b\x32\x1e.google.privacy.dlp.v2.FieldId"H\n\x0cSampleMethod\x12\x1d\n\x19SAMPLE_METHOD_UNSPECIFIED\x10\x00\x12\x07\n\x03TOP\x10\x01\x12\x10\n\x0cRANDOM_START\x10\x02"\x9a\x04\n\rStorageConfig\x12\x44\n\x11\x64\x61tastore_options\x18\x02 \x01(\x0b\x32\'.google.privacy.dlp.v2.DatastoreOptionsH\x00\x12K\n\x15\x63loud_storage_options\x18\x03 \x01(\x0b\x32*.google.privacy.dlp.v2.CloudStorageOptionsH\x00\x12\x43\n\x11\x62ig_query_options\x18\x04 \x01(\x0b\x32&.google.privacy.dlp.v2.BigQueryOptionsH\x00\x12L\n\x0ftimespan_config\x18\x06 \x01(\x0b\x32\x33.google.privacy.dlp.v2.StorageConfig.TimespanConfig\x1a\xda\x01\n\x0eTimespanConfig\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x0ftimestamp_field\x18\x03 \x01(\x0b\x32\x1e.google.privacy.dlp.v2.FieldId\x12\x31\n)enable_auto_population_of_timespan_config\x18\x04 \x01(\x08\x42\x06\n\x04type"`\n\x0b\x42igQueryKey\x12=\n\x0ftable_reference\x18\x01 \x01(\x0b\x32$.google.privacy.dlp.v2.BigQueryTable\x12\x12\n\nrow_number\x18\x02 \x01(\x03">\n\x0c\x44\x61tastoreKey\x12.\n\nentity_key\x18\x01 \x01(\x0b\x32\x1a.google.privacy.dlp.v2.Key"\xbb\x01\n\x03Key\x12\x38\n\x0cpartition_id\x18\x01 \x01(\x0b\x32".google.privacy.dlp.v2.PartitionId\x12\x34\n\x04path\x18\x02 \x03(\x0b\x32&.google.privacy.dlp.v2.Key.PathElement\x1a\x44\n\x0bPathElement\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x02id\x18\x02 \x01(\x03H\x00\x12\x0e\n\x04name\x18\x03 \x01(\tH\x00\x42\t\n\x07id_type"\x8e\x01\n\tRecordKey\x12<\n\rdatastore_key\x18\x02 \x01(\x0b\x32#.google.privacy.dlp.v2.DatastoreKeyH\x00\x12;\n\rbig_query_key\x18\x03 \x01(\x0b\x32".google.privacy.dlp.v2.BigQueryKeyH\x00\x42\x06\n\x04type"I\n\rBigQueryTable\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x10\n\x08table_id\x18\x03 \x01(\t"s\n\rBigQueryField\x12\x33\n\x05table\x18\x01 \x01(\x0b\x32$.google.privacy.dlp.v2.BigQueryTable\x12-\n\x05\x66ield\x18\x02 \x01(\x0b\x32\x1e.google.privacy.dlp.v2.FieldId"9\n\x08\x45ntityId\x12-\n\x05\x66ield\x18\x01 \x01(\x0b\x32\x1e.google.privacy.dlp.v2.FieldId*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*E\n\x08\x46ileType\x12\x19\n\x15\x46ILE_TYPE_UNSPECIFIED\x10\x00\x12\x0f\n\x0b\x42INARY_FILE\x10\x01\x12\r\n\tTEXT_FILE\x10\x02\x42\x8f\x01\n\x19\x63om.google.privacy.dlp.v2B\nDlpStorageP\x01Z8google.golang.org/genproto/googleapis/privacy/dlp/v2;dlp\xaa\x02\x13Google.Cloud.Dlp.V2\xca\x02\x13Google\\Cloud\\Dlp\\V2b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_LIKELIHOOD = _descriptor.EnumDescriptor(
name="Likelihood",
full_name="google.privacy.dlp.v2.Likelihood",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="LIKELIHOOD_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="VERY_UNLIKELY", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNLIKELY", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="POSSIBLE", index=3, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="LIKELY", index=4, number=4, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="VERY_LIKELY", index=5, number=5, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=4293,
serialized_end=4409,
)
_sym_db.RegisterEnumDescriptor(_LIKELIHOOD)
Likelihood = enum_type_wrapper.EnumTypeWrapper(_LIKELIHOOD)
_FILETYPE = _descriptor.EnumDescriptor(
name="FileType",
full_name="google.privacy.dlp.v2.FileType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="FILE_TYPE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="BINARY_FILE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TEXT_FILE", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=4411,
serialized_end=4480,
)
_sym_db.RegisterEnumDescriptor(_FILETYPE)
FileType = enum_type_wrapper.EnumTypeWrapper(_FILETYPE)
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
FILE_TYPE_UNSPECIFIED = 0
BINARY_FILE = 1
TEXT_FILE = 2
_CUSTOMINFOTYPE_EXCLUSIONTYPE = _descriptor.EnumDescriptor(
name="ExclusionType",
full_name="google.privacy.dlp.v2.CustomInfoType.ExclusionType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="EXCLUSION_TYPE_UNSPECIFIED",
index=0,
number=0,
options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="EXCLUSION_TYPE_EXCLUDE", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=1615,
serialized_end=1690,
)
_sym_db.RegisterEnumDescriptor(_CUSTOMINFOTYPE_EXCLUSIONTYPE)
_CLOUDSTORAGEOPTIONS_SAMPLEMETHOD = _descriptor.EnumDescriptor(
name="SampleMethod",
full_name="google.privacy.dlp.v2.CloudStorageOptions.SampleMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SAMPLE_METHOD_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TOP", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RANDOM_START", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2462,
serialized_end=2534,
)
_sym_db.RegisterEnumDescriptor(_CLOUDSTORAGEOPTIONS_SAMPLEMETHOD)
_BIGQUERYOPTIONS_SAMPLEMETHOD = _descriptor.EnumDescriptor(
name="SampleMethod",
full_name="google.privacy.dlp.v2.BigQueryOptions.SampleMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SAMPLE_METHOD_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TOP", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RANDOM_START", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2462,
serialized_end=2534,
)
_sym_db.RegisterEnumDescriptor(_BIGQUERYOPTIONS_SAMPLEMETHOD)
_INFOTYPE = _descriptor.Descriptor(
name="InfoType",
full_name="google.privacy.dlp.v2.InfoType",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.privacy.dlp.v2.InfoType.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=137,
serialized_end=161,
)
_STOREDTYPE = _descriptor.Descriptor(
name="StoredType",
full_name="google.privacy.dlp.v2.StoredType",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.privacy.dlp.v2.StoredType.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="create_time",
full_name="google.privacy.dlp.v2.StoredType.create_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=163,
serialized_end=238,
)
_CUSTOMINFOTYPE_DICTIONARY_WORDLIST = _descriptor.Descriptor(
name="WordList",
full_name="google.privacy.dlp.v2.CustomInfoType.Dictionary.WordList",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="words",
full_name="google.privacy.dlp.v2.CustomInfoType.Dictionary.WordList.words",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=958,
serialized_end=983,
)
_CUSTOMINFOTYPE_DICTIONARY = _descriptor.Descriptor(
name="Dictionary",
full_name="google.privacy.dlp.v2.CustomInfoType.Dictionary",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="word_list",
full_name="google.privacy.dlp.v2.CustomInfoType.Dictionary.word_list",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cloud_storage_path",
full_name="google.privacy.dlp.v2.CustomInfoType.Dictionary.cloud_storage_path",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_CUSTOMINFOTYPE_DICTIONARY_WORDLIST],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="source",
full_name="google.privacy.dlp.v2.CustomInfoType.Dictionary.source",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=793,
serialized_end=993,
)
_CUSTOMINFOTYPE_REGEX = _descriptor.Descriptor(
name="Regex",
full_name="google.privacy.dlp.v2.CustomInfoType.Regex",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pattern",
full_name="google.privacy.dlp.v2.CustomInfoType.Regex.pattern",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=995,
serialized_end=1019,
)
_CUSTOMINFOTYPE_SURROGATETYPE = _descriptor.Descriptor(
name="SurrogateType",
full_name="google.privacy.dlp.v2.CustomInfoType.SurrogateType",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1021,
serialized_end=1036,
)
_CUSTOMINFOTYPE_DETECTIONRULE_PROXIMITY = _descriptor.Descriptor(
name="Proximity",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.Proximity",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="window_before",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.Proximity.window_before",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="window_after",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.Proximity.window_after",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1145,
serialized_end=1201,
)
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT = _descriptor.Descriptor(
name="LikelihoodAdjustment",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustment",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="fixed_likelihood",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustment.fixed_likelihood",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="relative_likelihood",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustment.relative_likelihood",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="adjustment",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustment.adjustment",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=1204,
serialized_end=1334,
)
_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE = _descriptor.Descriptor(
name="HotwordRule",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="hotword_regex",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRule.hotword_regex",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="proximity",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRule.proximity",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="likelihood_adjustment",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRule.likelihood_adjustment",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1337,
serialized_end=1605,
)
_CUSTOMINFOTYPE_DETECTIONRULE = _descriptor.Descriptor(
name="DetectionRule",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="hotword_rule",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.hotword_rule",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[
_CUSTOMINFOTYPE_DETECTIONRULE_PROXIMITY,
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT,
_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE,
],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="type",
full_name="google.privacy.dlp.v2.CustomInfoType.DetectionRule.type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=1039,
serialized_end=1613,
)
_CUSTOMINFOTYPE = _descriptor.Descriptor(
name="CustomInfoType",
full_name="google.privacy.dlp.v2.CustomInfoType",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="info_type",
full_name="google.privacy.dlp.v2.CustomInfoType.info_type",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="likelihood",
full_name="google.privacy.dlp.v2.CustomInfoType.likelihood",
index=1,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dictionary",
full_name="google.privacy.dlp.v2.CustomInfoType.dictionary",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="regex",
full_name="google.privacy.dlp.v2.CustomInfoType.regex",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="surrogate_type",
full_name="google.privacy.dlp.v2.CustomInfoType.surrogate_type",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stored_type",
full_name="google.privacy.dlp.v2.CustomInfoType.stored_type",
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="detection_rules",
full_name="google.privacy.dlp.v2.CustomInfoType.detection_rules",
index=6,
number=7,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="exclusion_type",
full_name="google.privacy.dlp.v2.CustomInfoType.exclusion_type",
index=7,
number=8,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_CUSTOMINFOTYPE_DICTIONARY,
_CUSTOMINFOTYPE_REGEX,
_CUSTOMINFOTYPE_SURROGATETYPE,
_CUSTOMINFOTYPE_DETECTIONRULE,
],
enum_types=[_CUSTOMINFOTYPE_EXCLUSIONTYPE],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="type",
full_name="google.privacy.dlp.v2.CustomInfoType.type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=241,
serialized_end=1698,
)
_FIELDID = _descriptor.Descriptor(
name="FieldId",
full_name="google.privacy.dlp.v2.FieldId",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.privacy.dlp.v2.FieldId.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1700,
serialized_end=1723,
)
_PARTITIONID = _descriptor.Descriptor(
name="PartitionId",
full_name="google.privacy.dlp.v2.PartitionId",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.privacy.dlp.v2.PartitionId.project_id",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="namespace_id",
full_name="google.privacy.dlp.v2.PartitionId.namespace_id",
index=1,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1725,
serialized_end=1780,
)
_KINDEXPRESSION = _descriptor.Descriptor(
name="KindExpression",
full_name="google.privacy.dlp.v2.KindExpression",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.privacy.dlp.v2.KindExpression.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1782,
serialized_end=1812,
)
_DATASTOREOPTIONS = _descriptor.Descriptor(
name="DatastoreOptions",
full_name="google.privacy.dlp.v2.DatastoreOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partition_id",
full_name="google.privacy.dlp.v2.DatastoreOptions.partition_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="kind",
full_name="google.privacy.dlp.v2.DatastoreOptions.kind",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1815,
serialized_end=1944,
)
_CLOUDSTORAGEREGEXFILESET = _descriptor.Descriptor(
name="CloudStorageRegexFileSet",
full_name="google.privacy.dlp.v2.CloudStorageRegexFileSet",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="bucket_name",
full_name="google.privacy.dlp.v2.CloudStorageRegexFileSet.bucket_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="include_regex",
full_name="google.privacy.dlp.v2.CloudStorageRegexFileSet.include_regex",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="exclude_regex",
full_name="google.privacy.dlp.v2.CloudStorageRegexFileSet.exclude_regex",
index=2,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1946,
serialized_end=2039,
)
_CLOUDSTORAGEOPTIONS_FILESET = _descriptor.Descriptor(
name="FileSet",
full_name="google.privacy.dlp.v2.CloudStorageOptions.FileSet",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="url",
full_name="google.privacy.dlp.v2.CloudStorageOptions.FileSet.url",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="regex_file_set",
full_name="google.privacy.dlp.v2.CloudStorageOptions.FileSet.regex_file_set",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2365,
serialized_end=2460,
)
_CLOUDSTORAGEOPTIONS = _descriptor.Descriptor(
name="CloudStorageOptions",
full_name="google.privacy.dlp.v2.CloudStorageOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="file_set",
full_name="google.privacy.dlp.v2.CloudStorageOptions.file_set",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bytes_limit_per_file",
full_name="google.privacy.dlp.v2.CloudStorageOptions.bytes_limit_per_file",
index=1,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bytes_limit_per_file_percent",
full_name="google.privacy.dlp.v2.CloudStorageOptions.bytes_limit_per_file_percent",
index=2,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="file_types",
full_name="google.privacy.dlp.v2.CloudStorageOptions.file_types",
index=3,
number=5,
type=14,
cpp_type=8,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sample_method",
full_name="google.privacy.dlp.v2.CloudStorageOptions.sample_method",
index=4,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="files_limit_percent",
full_name="google.privacy.dlp.v2.CloudStorageOptions.files_limit_percent",
index=5,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_CLOUDSTORAGEOPTIONS_FILESET],
enum_types=[_CLOUDSTORAGEOPTIONS_SAMPLEMETHOD],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2042,
serialized_end=2534,
)
_CLOUDSTORAGEFILESET = _descriptor.Descriptor(
name="CloudStorageFileSet",
full_name="google.privacy.dlp.v2.CloudStorageFileSet",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="url",
full_name="google.privacy.dlp.v2.CloudStorageFileSet.url",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2536,
serialized_end=2570,
)
_CLOUDSTORAGEPATH = _descriptor.Descriptor(
name="CloudStoragePath",
full_name="google.privacy.dlp.v2.CloudStoragePath",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="path",
full_name="google.privacy.dlp.v2.CloudStoragePath.path",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2572,
serialized_end=2604,
)
_BIGQUERYOPTIONS = _descriptor.Descriptor(
name="BigQueryOptions",
full_name="google.privacy.dlp.v2.BigQueryOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_reference",
full_name="google.privacy.dlp.v2.BigQueryOptions.table_reference",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="identifying_fields",
full_name="google.privacy.dlp.v2.BigQueryOptions.identifying_fields",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows_limit",
full_name="google.privacy.dlp.v2.BigQueryOptions.rows_limit",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows_limit_percent",
full_name="google.privacy.dlp.v2.BigQueryOptions.rows_limit_percent",
index=3,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sample_method",
full_name="google.privacy.dlp.v2.BigQueryOptions.sample_method",
index=4,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="excluded_fields",
full_name="google.privacy.dlp.v2.BigQueryOptions.excluded_fields",
index=5,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_BIGQUERYOPTIONS_SAMPLEMETHOD],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2607,
serialized_end=3002,
)
_STORAGECONFIG_TIMESPANCONFIG = _descriptor.Descriptor(
name="TimespanConfig",
full_name="google.privacy.dlp.v2.StorageConfig.TimespanConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.privacy.dlp.v2.StorageConfig.TimespanConfig.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.privacy.dlp.v2.StorageConfig.TimespanConfig.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp_field",
full_name="google.privacy.dlp.v2.StorageConfig.TimespanConfig.timestamp_field",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="enable_auto_population_of_timespan_config",
full_name="google.privacy.dlp.v2.StorageConfig.TimespanConfig.enable_auto_population_of_timespan_config",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3317,
serialized_end=3535,
)
_STORAGECONFIG = _descriptor.Descriptor(
name="StorageConfig",
full_name="google.privacy.dlp.v2.StorageConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="datastore_options",
full_name="google.privacy.dlp.v2.StorageConfig.datastore_options",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cloud_storage_options",
full_name="google.privacy.dlp.v2.StorageConfig.cloud_storage_options",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="big_query_options",
full_name="google.privacy.dlp.v2.StorageConfig.big_query_options",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timespan_config",
full_name="google.privacy.dlp.v2.StorageConfig.timespan_config",
index=3,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_STORAGECONFIG_TIMESPANCONFIG],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="type",
full_name="google.privacy.dlp.v2.StorageConfig.type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=3005,
serialized_end=3543,
)
_BIGQUERYKEY = _descriptor.Descriptor(
name="BigQueryKey",
full_name="google.privacy.dlp.v2.BigQueryKey",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_reference",
full_name="google.privacy.dlp.v2.BigQueryKey.table_reference",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_number",
full_name="google.privacy.dlp.v2.BigQueryKey.row_number",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3545,
serialized_end=3641,
)
_DATASTOREKEY = _descriptor.Descriptor(
name="DatastoreKey",
full_name="google.privacy.dlp.v2.DatastoreKey",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="entity_key",
full_name="google.privacy.dlp.v2.DatastoreKey.entity_key",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3643,
serialized_end=3705,
)
_KEY_PATHELEMENT = _descriptor.Descriptor(
name="PathElement",
full_name="google.privacy.dlp.v2.Key.PathElement",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="kind",
full_name="google.privacy.dlp.v2.Key.PathElement.kind",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="id",
full_name="google.privacy.dlp.v2.Key.PathElement.id",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="name",
full_name="google.privacy.dlp.v2.Key.PathElement.name",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="id_type",
full_name="google.privacy.dlp.v2.Key.PathElement.id_type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=3827,
serialized_end=3895,
)
_KEY = _descriptor.Descriptor(
name="Key",
full_name="google.privacy.dlp.v2.Key",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partition_id",
full_name="google.privacy.dlp.v2.Key.partition_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="path",
full_name="google.privacy.dlp.v2.Key.path",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_KEY_PATHELEMENT],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3708,
serialized_end=3895,
)
_RECORDKEY = _descriptor.Descriptor(
name="RecordKey",
full_name="google.privacy.dlp.v2.RecordKey",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="datastore_key",
full_name="google.privacy.dlp.v2.RecordKey.datastore_key",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="big_query_key",
full_name="google.privacy.dlp.v2.RecordKey.big_query_key",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="type",
full_name="google.privacy.dlp.v2.RecordKey.type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=3898,
serialized_end=4040,
)
_BIGQUERYTABLE = _descriptor.Descriptor(
name="BigQueryTable",
full_name="google.privacy.dlp.v2.BigQueryTable",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.privacy.dlp.v2.BigQueryTable.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset_id",
full_name="google.privacy.dlp.v2.BigQueryTable.dataset_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="table_id",
full_name="google.privacy.dlp.v2.BigQueryTable.table_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4042,
serialized_end=4115,
)
_BIGQUERYFIELD = _descriptor.Descriptor(
name="BigQueryField",
full_name="google.privacy.dlp.v2.BigQueryField",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table",
full_name="google.privacy.dlp.v2.BigQueryField.table",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field",
full_name="google.privacy.dlp.v2.BigQueryField.field",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4117,
serialized_end=4232,
)
_ENTITYID = _descriptor.Descriptor(
name="EntityId",
full_name="google.privacy.dlp.v2.EntityId",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="field",
full_name="google.privacy.dlp.v2.EntityId.field",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4234,
serialized_end=4291,
)
_STOREDTYPE.fields_by_name[
"create_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CUSTOMINFOTYPE_DICTIONARY_WORDLIST.containing_type = _CUSTOMINFOTYPE_DICTIONARY
_CUSTOMINFOTYPE_DICTIONARY.fields_by_name[
"word_list"
].message_type = _CUSTOMINFOTYPE_DICTIONARY_WORDLIST
_CUSTOMINFOTYPE_DICTIONARY.fields_by_name[
"cloud_storage_path"
].message_type = _CLOUDSTORAGEPATH
_CUSTOMINFOTYPE_DICTIONARY.containing_type = _CUSTOMINFOTYPE
_CUSTOMINFOTYPE_DICTIONARY.oneofs_by_name["source"].fields.append(
_CUSTOMINFOTYPE_DICTIONARY.fields_by_name["word_list"]
)
_CUSTOMINFOTYPE_DICTIONARY.fields_by_name[
"word_list"
].containing_oneof = _CUSTOMINFOTYPE_DICTIONARY.oneofs_by_name["source"]
_CUSTOMINFOTYPE_DICTIONARY.oneofs_by_name["source"].fields.append(
_CUSTOMINFOTYPE_DICTIONARY.fields_by_name["cloud_storage_path"]
)
_CUSTOMINFOTYPE_DICTIONARY.fields_by_name[
"cloud_storage_path"
].containing_oneof = _CUSTOMINFOTYPE_DICTIONARY.oneofs_by_name["source"]
_CUSTOMINFOTYPE_REGEX.containing_type = _CUSTOMINFOTYPE
_CUSTOMINFOTYPE_SURROGATETYPE.containing_type = _CUSTOMINFOTYPE
_CUSTOMINFOTYPE_DETECTIONRULE_PROXIMITY.containing_type = _CUSTOMINFOTYPE_DETECTIONRULE
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.fields_by_name[
"fixed_likelihood"
].enum_type = _LIKELIHOOD
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.containing_type = (
_CUSTOMINFOTYPE_DETECTIONRULE
)
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.oneofs_by_name[
"adjustment"
].fields.append(
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.fields_by_name[
"fixed_likelihood"
]
)
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.fields_by_name[
"fixed_likelihood"
].containing_oneof = _CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.oneofs_by_name[
"adjustment"
]
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.oneofs_by_name[
"adjustment"
].fields.append(
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.fields_by_name[
"relative_likelihood"
]
)
_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.fields_by_name[
"relative_likelihood"
].containing_oneof = _CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT.oneofs_by_name[
"adjustment"
]
_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE.fields_by_name[
"hotword_regex"
].message_type = _CUSTOMINFOTYPE_REGEX
_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE.fields_by_name[
"proximity"
].message_type = _CUSTOMINFOTYPE_DETECTIONRULE_PROXIMITY
_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE.fields_by_name[
"likelihood_adjustment"
].message_type = _CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT
_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE.containing_type = (
_CUSTOMINFOTYPE_DETECTIONRULE
)
_CUSTOMINFOTYPE_DETECTIONRULE.fields_by_name[
"hotword_rule"
].message_type = _CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE
_CUSTOMINFOTYPE_DETECTIONRULE.containing_type = _CUSTOMINFOTYPE
_CUSTOMINFOTYPE_DETECTIONRULE.oneofs_by_name["type"].fields.append(
_CUSTOMINFOTYPE_DETECTIONRULE.fields_by_name["hotword_rule"]
)
_CUSTOMINFOTYPE_DETECTIONRULE.fields_by_name[
"hotword_rule"
].containing_oneof = _CUSTOMINFOTYPE_DETECTIONRULE.oneofs_by_name["type"]
_CUSTOMINFOTYPE.fields_by_name["info_type"].message_type = _INFOTYPE
_CUSTOMINFOTYPE.fields_by_name["likelihood"].enum_type = _LIKELIHOOD
_CUSTOMINFOTYPE.fields_by_name["dictionary"].message_type = _CUSTOMINFOTYPE_DICTIONARY
_CUSTOMINFOTYPE.fields_by_name["regex"].message_type = _CUSTOMINFOTYPE_REGEX
_CUSTOMINFOTYPE.fields_by_name[
"surrogate_type"
].message_type = _CUSTOMINFOTYPE_SURROGATETYPE
_CUSTOMINFOTYPE.fields_by_name["stored_type"].message_type = _STOREDTYPE
_CUSTOMINFOTYPE.fields_by_name[
"detection_rules"
].message_type = _CUSTOMINFOTYPE_DETECTIONRULE
_CUSTOMINFOTYPE.fields_by_name[
"exclusion_type"
].enum_type = _CUSTOMINFOTYPE_EXCLUSIONTYPE
_CUSTOMINFOTYPE_EXCLUSIONTYPE.containing_type = _CUSTOMINFOTYPE
_CUSTOMINFOTYPE.oneofs_by_name["type"].fields.append(
_CUSTOMINFOTYPE.fields_by_name["dictionary"]
)
_CUSTOMINFOTYPE.fields_by_name[
"dictionary"
].containing_oneof = _CUSTOMINFOTYPE.oneofs_by_name["type"]
_CUSTOMINFOTYPE.oneofs_by_name["type"].fields.append(
_CUSTOMINFOTYPE.fields_by_name["regex"]
)
_CUSTOMINFOTYPE.fields_by_name[
"regex"
].containing_oneof = _CUSTOMINFOTYPE.oneofs_by_name["type"]
_CUSTOMINFOTYPE.oneofs_by_name["type"].fields.append(
_CUSTOMINFOTYPE.fields_by_name["surrogate_type"]
)
_CUSTOMINFOTYPE.fields_by_name[
"surrogate_type"
].containing_oneof = _CUSTOMINFOTYPE.oneofs_by_name["type"]
_CUSTOMINFOTYPE.oneofs_by_name["type"].fields.append(
_CUSTOMINFOTYPE.fields_by_name["stored_type"]
)
_CUSTOMINFOTYPE.fields_by_name[
"stored_type"
].containing_oneof = _CUSTOMINFOTYPE.oneofs_by_name["type"]
_DATASTOREOPTIONS.fields_by_name["partition_id"].message_type = _PARTITIONID
_DATASTOREOPTIONS.fields_by_name["kind"].message_type = _KINDEXPRESSION
_CLOUDSTORAGEOPTIONS_FILESET.fields_by_name[
"regex_file_set"
].message_type = _CLOUDSTORAGEREGEXFILESET
_CLOUDSTORAGEOPTIONS_FILESET.containing_type = _CLOUDSTORAGEOPTIONS
_CLOUDSTORAGEOPTIONS.fields_by_name[
"file_set"
].message_type = _CLOUDSTORAGEOPTIONS_FILESET
_CLOUDSTORAGEOPTIONS.fields_by_name["file_types"].enum_type = _FILETYPE
_CLOUDSTORAGEOPTIONS.fields_by_name[
"sample_method"
].enum_type = _CLOUDSTORAGEOPTIONS_SAMPLEMETHOD
_CLOUDSTORAGEOPTIONS_SAMPLEMETHOD.containing_type = _CLOUDSTORAGEOPTIONS
_BIGQUERYOPTIONS.fields_by_name["table_reference"].message_type = _BIGQUERYTABLE
_BIGQUERYOPTIONS.fields_by_name["identifying_fields"].message_type = _FIELDID
_BIGQUERYOPTIONS.fields_by_name[
"sample_method"
].enum_type = _BIGQUERYOPTIONS_SAMPLEMETHOD
_BIGQUERYOPTIONS.fields_by_name["excluded_fields"].message_type = _FIELDID
_BIGQUERYOPTIONS_SAMPLEMETHOD.containing_type = _BIGQUERYOPTIONS
_STORAGECONFIG_TIMESPANCONFIG.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STORAGECONFIG_TIMESPANCONFIG.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STORAGECONFIG_TIMESPANCONFIG.fields_by_name["timestamp_field"].message_type = _FIELDID
_STORAGECONFIG_TIMESPANCONFIG.containing_type = _STORAGECONFIG
_STORAGECONFIG.fields_by_name["datastore_options"].message_type = _DATASTOREOPTIONS
_STORAGECONFIG.fields_by_name[
"cloud_storage_options"
].message_type = _CLOUDSTORAGEOPTIONS
_STORAGECONFIG.fields_by_name["big_query_options"].message_type = _BIGQUERYOPTIONS
_STORAGECONFIG.fields_by_name[
"timespan_config"
].message_type = _STORAGECONFIG_TIMESPANCONFIG
_STORAGECONFIG.oneofs_by_name["type"].fields.append(
_STORAGECONFIG.fields_by_name["datastore_options"]
)
_STORAGECONFIG.fields_by_name[
"datastore_options"
].containing_oneof = _STORAGECONFIG.oneofs_by_name["type"]
_STORAGECONFIG.oneofs_by_name["type"].fields.append(
_STORAGECONFIG.fields_by_name["cloud_storage_options"]
)
_STORAGECONFIG.fields_by_name[
"cloud_storage_options"
].containing_oneof = _STORAGECONFIG.oneofs_by_name["type"]
_STORAGECONFIG.oneofs_by_name["type"].fields.append(
_STORAGECONFIG.fields_by_name["big_query_options"]
)
_STORAGECONFIG.fields_by_name[
"big_query_options"
].containing_oneof = _STORAGECONFIG.oneofs_by_name["type"]
_BIGQUERYKEY.fields_by_name["table_reference"].message_type = _BIGQUERYTABLE
_DATASTOREKEY.fields_by_name["entity_key"].message_type = _KEY
_KEY_PATHELEMENT.containing_type = _KEY
_KEY_PATHELEMENT.oneofs_by_name["id_type"].fields.append(
_KEY_PATHELEMENT.fields_by_name["id"]
)
_KEY_PATHELEMENT.fields_by_name[
"id"
].containing_oneof = _KEY_PATHELEMENT.oneofs_by_name["id_type"]
_KEY_PATHELEMENT.oneofs_by_name["id_type"].fields.append(
_KEY_PATHELEMENT.fields_by_name["name"]
)
_KEY_PATHELEMENT.fields_by_name[
"name"
].containing_oneof = _KEY_PATHELEMENT.oneofs_by_name["id_type"]
_KEY.fields_by_name["partition_id"].message_type = _PARTITIONID
_KEY.fields_by_name["path"].message_type = _KEY_PATHELEMENT
_RECORDKEY.fields_by_name["datastore_key"].message_type = _DATASTOREKEY
_RECORDKEY.fields_by_name["big_query_key"].message_type = _BIGQUERYKEY
_RECORDKEY.oneofs_by_name["type"].fields.append(
_RECORDKEY.fields_by_name["datastore_key"]
)
_RECORDKEY.fields_by_name["datastore_key"].containing_oneof = _RECORDKEY.oneofs_by_name[
"type"
]
_RECORDKEY.oneofs_by_name["type"].fields.append(
_RECORDKEY.fields_by_name["big_query_key"]
)
_RECORDKEY.fields_by_name["big_query_key"].containing_oneof = _RECORDKEY.oneofs_by_name[
"type"
]
_BIGQUERYFIELD.fields_by_name["table"].message_type = _BIGQUERYTABLE
_BIGQUERYFIELD.fields_by_name["field"].message_type = _FIELDID
_ENTITYID.fields_by_name["field"].message_type = _FIELDID
DESCRIPTOR.message_types_by_name["InfoType"] = _INFOTYPE
DESCRIPTOR.message_types_by_name["StoredType"] = _STOREDTYPE
DESCRIPTOR.message_types_by_name["CustomInfoType"] = _CUSTOMINFOTYPE
DESCRIPTOR.message_types_by_name["FieldId"] = _FIELDID
DESCRIPTOR.message_types_by_name["PartitionId"] = _PARTITIONID
DESCRIPTOR.message_types_by_name["KindExpression"] = _KINDEXPRESSION
DESCRIPTOR.message_types_by_name["DatastoreOptions"] = _DATASTOREOPTIONS
DESCRIPTOR.message_types_by_name["CloudStorageRegexFileSet"] = _CLOUDSTORAGEREGEXFILESET
DESCRIPTOR.message_types_by_name["CloudStorageOptions"] = _CLOUDSTORAGEOPTIONS
DESCRIPTOR.message_types_by_name["CloudStorageFileSet"] = _CLOUDSTORAGEFILESET
DESCRIPTOR.message_types_by_name["CloudStoragePath"] = _CLOUDSTORAGEPATH
DESCRIPTOR.message_types_by_name["BigQueryOptions"] = _BIGQUERYOPTIONS
DESCRIPTOR.message_types_by_name["StorageConfig"] = _STORAGECONFIG
DESCRIPTOR.message_types_by_name["BigQueryKey"] = _BIGQUERYKEY
DESCRIPTOR.message_types_by_name["DatastoreKey"] = _DATASTOREKEY
DESCRIPTOR.message_types_by_name["Key"] = _KEY
DESCRIPTOR.message_types_by_name["RecordKey"] = _RECORDKEY
DESCRIPTOR.message_types_by_name["BigQueryTable"] = _BIGQUERYTABLE
DESCRIPTOR.message_types_by_name["BigQueryField"] = _BIGQUERYFIELD
DESCRIPTOR.message_types_by_name["EntityId"] = _ENTITYID
DESCRIPTOR.enum_types_by_name["Likelihood"] = _LIKELIHOOD
DESCRIPTOR.enum_types_by_name["FileType"] = _FILETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InfoType = _reflection.GeneratedProtocolMessageType(
"InfoType",
(_message.Message,),
dict(
DESCRIPTOR=_INFOTYPE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Type of information detected by the API.
Attributes:
name:
Name of the information type. Either a name of your choosing
when creating a CustomInfoType, or one of the names listed at
https://cloud.google.com/dlp/docs/infotypes-reference when
specifying a built-in type. InfoType names should conform to
the pattern [a-zA-Z0-9\_]{1,64}.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.InfoType)
),
)
_sym_db.RegisterMessage(InfoType)
StoredType = _reflection.GeneratedProtocolMessageType(
"StoredType",
(_message.Message,),
dict(
DESCRIPTOR=_STOREDTYPE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""A reference to a StoredInfoType to use with scanning.
Attributes:
name:
Resource name of the requested ``StoredInfoType``, for example
``organizations/433245324/storedInfoTypes/432452342`` or
``projects/project-id/storedInfoTypes/432452342``.
create_time:
Timestamp indicating when the version of the
``StoredInfoType`` used for inspection was created. Output-
only field, populated by the system.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.StoredType)
),
)
_sym_db.RegisterMessage(StoredType)
CustomInfoType = _reflection.GeneratedProtocolMessageType(
"CustomInfoType",
(_message.Message,),
dict(
Dictionary=_reflection.GeneratedProtocolMessageType(
"Dictionary",
(_message.Message,),
dict(
WordList=_reflection.GeneratedProtocolMessageType(
"WordList",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMINFOTYPE_DICTIONARY_WORDLIST,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message defining a list of words or phrases to search for in the data.
Attributes:
words:
Words or phrases defining the dictionary. The dictionary must
contain at least one phrase and every phrase must contain at
least 2 characters that are letters or digits. [required]
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.Dictionary.WordList)
),
),
DESCRIPTOR=_CUSTOMINFOTYPE_DICTIONARY,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Custom information type based on a dictionary of words or phrases. This
can be used to match sensitive information specific to the data, such as
a list of employee IDs or job titles.
Dictionary words are case-insensitive and all characters other than
letters and digits in the unicode `Basic Multilingual
Plane <https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane>`__
will be replaced with whitespace when scanning for matches, so the
dictionary phrase "Sam Johnson" will match all three phrases "sam
johnson", "Sam, Johnson", and "Sam (Johnson)". Additionally, the
characters surrounding any match must be of a different type than the
adjacent characters within the word, so letters must be next to
non-letters and digits next to non-digits. For example, the dictionary
word "jen" will match the first three letters of the text "jen123" but
will return no matches for "jennifer".
Dictionary words containing a large number of characters that are not
letters or digits may result in unexpected findings because such
characters are treated as whitespace. The
`limits <https://cloud.google.com/dlp/limits>`__ page contains details
about the size limits of dictionaries. For dictionaries that do not fit
within these constraints, consider using ``LargeCustomDictionaryConfig``
in the ``StoredInfoType`` API.
Attributes:
word_list:
List of words or phrases to search for.
cloud_storage_path:
Newline-delimited file of words in Cloud Storage. Only a
single file is accepted.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.Dictionary)
),
),
Regex=_reflection.GeneratedProtocolMessageType(
"Regex",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMINFOTYPE_REGEX,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message defining a custom regular expression.
Attributes:
pattern:
Pattern defining the regular expression. Its syntax
(https://github.com/google/re2/wiki/Syntax) can be found under
the google/re2 repository on GitHub.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.Regex)
),
),
SurrogateType=_reflection.GeneratedProtocolMessageType(
"SurrogateType",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMINFOTYPE_SURROGATETYPE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message for detecting output from deidentification transformations such
as
```CryptoReplaceFfxFpeConfig`` </dlp/docs/reference/rest/v2/organizations.deidentifyTemplates#cryptoreplaceffxfpeconfig>`__.
These types of transformations are those that perform pseudonymization,
thereby producing a "surrogate" as output. This should be used in
conjunction with a field on the transformation such as
``surrogate_info_type``. This CustomInfoType does not support the use of
``detection_rules``.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.SurrogateType)
),
),
DetectionRule=_reflection.GeneratedProtocolMessageType(
"DetectionRule",
(_message.Message,),
dict(
Proximity=_reflection.GeneratedProtocolMessageType(
"Proximity",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMINFOTYPE_DETECTIONRULE_PROXIMITY,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message for specifying a window around a finding to apply a detection
rule.
Attributes:
window_before:
Number of characters before the finding to consider.
window_after:
Number of characters after the finding to consider.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.DetectionRule.Proximity)
),
),
LikelihoodAdjustment=_reflection.GeneratedProtocolMessageType(
"LikelihoodAdjustment",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMINFOTYPE_DETECTIONRULE_LIKELIHOODADJUSTMENT,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message for specifying an adjustment to the likelihood of a finding as
part of a detection rule.
Attributes:
fixed_likelihood:
Set the likelihood of a finding to a fixed value.
relative_likelihood:
Increase or decrease the likelihood by the specified number of
levels. For example, if a finding would be ``POSSIBLE``
without the detection rule and ``relative_likelihood`` is 1,
then it is upgraded to ``LIKELY``, while a value of -1 would
downgrade it to ``UNLIKELY``. Likelihood may never drop below
``VERY_UNLIKELY`` or exceed ``VERY_LIKELY``, so applying an
adjustment of 1 followed by an adjustment of -1 when base
likelihood is ``VERY_LIKELY`` will result in a final
likelihood of ``LIKELY``.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustment)
),
),
HotwordRule=_reflection.GeneratedProtocolMessageType(
"HotwordRule",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMINFOTYPE_DETECTIONRULE_HOTWORDRULE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""The rule that adjusts the likelihood of findings within a certain
proximity of hotwords.
Attributes:
hotword_regex: \
Regular expression pattern defining what qualifies as a \
hotword.
proximity:
Proximity of the finding within which the entire hotword must
reside. The total length of the window cannot exceed 1000
characters. Note that the finding itself will be included in
the window, so that hotwords may be used to match substrings
of the finding itself. For example, the certainty of a phone
number regex "(\d\{3\}) \d\{3\}-\d\{4\} "\
could be adjusted upwards if the area code is \
known to be the local area code of a company office using the
hotword regex "(xxx)", where "xxx" is the area code in
question.
likelihood_adjustment: \
Likelihood adjustment to apply to all matching findings.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRule)
),
),
DESCRIPTOR=_CUSTOMINFOTYPE_DETECTIONRULE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Rule for modifying a CustomInfoType to alter behavior under certain
circumstances, depending on the specific details of the rule. Not
supported for the ``surrogate_type`` custom info type.
Attributes:
hotword_rule:
Hotword-based detection rule.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType.DetectionRule)
),
),
DESCRIPTOR=_CUSTOMINFOTYPE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Custom information type provided by the user. Used to find
domain-specific sensitive information configurable to the data in
question.
Attributes:
info_type:
CustomInfoType can either be a new infoType, or an extension
of built-in infoType, when the name matches one of existing
infoTypes and that infoType is specified in
``InspectContent.info_types`` field. Specifying the latter
adds findings to the one detected by the system. If built-in
info type is not specified in ``InspectContent.info_types``
list then the name is treated as a custom info type.
likelihood:
Likelihood to return for this CustomInfoType. This base value
can be altered by a detection rule if the finding meets the
criteria specified by the rule. Defaults to ``VERY_LIKELY`` if
not specified.
dictionary:
A list of phrases to detect as a CustomInfoType.
regex:
Regular expression based CustomInfoType.
surrogate_type:
Message for detecting output from deidentification
transformations that support reversing.
stored_type:
Load an existing ``StoredInfoType`` resource for use in
``InspectDataSource``. Not currently supported in
``InspectContent``.
detection_rules:
Set of detection rules to apply to all findings of this
CustomInfoType. Rules are applied in order that they are
specified. Not supported for the ``surrogate_type``
CustomInfoType.
exclusion_type:
If set to EXCLUSION\_TYPE\_EXCLUDE this infoType will not
cause a finding to be returned. It still can be used for rules
matching.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CustomInfoType)
),
)
_sym_db.RegisterMessage(CustomInfoType)
_sym_db.RegisterMessage(CustomInfoType.Dictionary)
_sym_db.RegisterMessage(CustomInfoType.Dictionary.WordList)
_sym_db.RegisterMessage(CustomInfoType.Regex)
_sym_db.RegisterMessage(CustomInfoType.SurrogateType)
_sym_db.RegisterMessage(CustomInfoType.DetectionRule)
_sym_db.RegisterMessage(CustomInfoType.DetectionRule.Proximity)
_sym_db.RegisterMessage(CustomInfoType.DetectionRule.LikelihoodAdjustment)
_sym_db.RegisterMessage(CustomInfoType.DetectionRule.HotwordRule)
FieldId = _reflection.GeneratedProtocolMessageType(
"FieldId",
(_message.Message,),
dict(
DESCRIPTOR=_FIELDID,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""General identifier of a data field in a storage service.
Attributes:
name:
Name describing the field.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.FieldId)
),
)
_sym_db.RegisterMessage(FieldId)
PartitionId = _reflection.GeneratedProtocolMessageType(
"PartitionId",
(_message.Message,),
dict(
DESCRIPTOR=_PARTITIONID,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Datastore partition ID. A partition ID identifies a grouping of
entities. The grouping is always by project and namespace, however the
namespace ID may be empty.
A partition ID contains several dimensions: project ID and namespace ID.
Attributes:
project_id:
The ID of the project to which the entities belong.
namespace_id:
If not empty, the ID of the namespace to which the entities
belong.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.PartitionId)
),
)
_sym_db.RegisterMessage(PartitionId)
KindExpression = _reflection.GeneratedProtocolMessageType(
"KindExpression",
(_message.Message,),
dict(
DESCRIPTOR=_KINDEXPRESSION,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""A representation of a Datastore kind.
Attributes:
name:
The name of the kind.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.KindExpression)
),
)
_sym_db.RegisterMessage(KindExpression)
DatastoreOptions = _reflection.GeneratedProtocolMessageType(
"DatastoreOptions",
(_message.Message,),
dict(
DESCRIPTOR=_DATASTOREOPTIONS,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Options defining a data set within Google Cloud Datastore.
Attributes:
partition_id:
A partition ID identifies a grouping of entities. The grouping
is always by project and namespace, however the namespace ID
may be empty.
kind:
The kind to process.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.DatastoreOptions)
),
)
_sym_db.RegisterMessage(DatastoreOptions)
CloudStorageRegexFileSet = _reflection.GeneratedProtocolMessageType(
"CloudStorageRegexFileSet",
(_message.Message,),
dict(
DESCRIPTOR=_CLOUDSTORAGEREGEXFILESET,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message representing a set of files in a Cloud Storage bucket. Regular
expressions are used to allow fine-grained control over which files in
the bucket to include.
Included files are those that match at least one item in
``include_regex`` and do not match any items in ``exclude_regex``. Note
that a file that matches items from both lists will *not* be included.
For a match to occur, the entire file path (i.e., everything in the url
after the bucket name) must match the regular expression.
For example, given the input
``{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}``:
- ``gs://mybucket/directory1/myfile`` will be included
- ``gs://mybucket/directory1/directory2/myfile`` will be included
(``.*`` matches across ``/``)
- ``gs://mybucket/directory0/directory1/myfile`` will *not* be included
(the full path doesn't match any items in ``include_regex``)
- ``gs://mybucket/directory1/excludedfile`` will *not* be included (the
path matches an item in ``exclude_regex``)
If ``include_regex`` is left empty, it will match all files by default
(this is equivalent to setting ``include_regex: [".*"]``).
Some other common use cases:
- ``{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}`` will
include all files in ``mybucket`` except for .pdf files
- ``{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}``
will include all files directly under ``gs://mybucket/directory/``,
without matching across ``/``
Attributes:
bucket_name:
The name of a Cloud Storage bucket. Required.
include_regex:
A list of regular expressions matching file paths to include.
All files in the bucket that match at least one of these
regular expressions will be included in the set of files,
except for those that also match an item in ``exclude_regex``.
Leaving this field empty will match all files by default (this
is equivalent to including ``.*`` in the list). Regular
expressions use RE2 `syntax
<https://github.com/google/re2/wiki/Syntax>`__; a guide can be
found under the google/re2 repository on GitHub.
exclude_regex:
A list of regular expressions matching file paths to exclude.
All files in the bucket that match at least one of these
regular expressions will be excluded from the scan. Regular
expressions use RE2 `syntax
<https://github.com/google/re2/wiki/Syntax>`__; a guide can be
found under the google/re2 repository on GitHub.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CloudStorageRegexFileSet)
),
)
_sym_db.RegisterMessage(CloudStorageRegexFileSet)
CloudStorageOptions = _reflection.GeneratedProtocolMessageType(
"CloudStorageOptions",
(_message.Message,),
dict(
FileSet=_reflection.GeneratedProtocolMessageType(
"FileSet",
(_message.Message,),
dict(
DESCRIPTOR=_CLOUDSTORAGEOPTIONS_FILESET,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Set of files to scan.
Attributes:
url:
The Cloud Storage url of the file(s) to scan, in the format
``gs://<bucket>/<path>``. Trailing wildcard in the path is
allowed. Exactly one of ``url`` or ``regex_file_set`` must be
set.
regex_file_set:
The regex-filtered set of files to scan. Exactly one of
``url`` or ``regex_file_set`` must be set.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CloudStorageOptions.FileSet)
),
),
DESCRIPTOR=_CLOUDSTORAGEOPTIONS,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Options defining a file or a set of files within a Google Cloud Storage
bucket.
Attributes:
file_set:
The set of one or more files to scan.
bytes_limit_per_file:
Max number of bytes to scan from a file. If a scanned file's
size is bigger than this value then the rest of the bytes are
omitted. Only one of bytes\_limit\_per\_file and
bytes\_limit\_per\_file\_percent can be specified.
bytes_limit_per_file_percent:
Max percentage of bytes to scan from a file. The rest are
omitted. The number of bytes scanned is rounded down. Must be
between 0 and 100, inclusively. Both 0 and 100 means no limit.
Defaults to 0. Only one of bytes\_limit\_per\_file and
bytes\_limit\_per\_file\_percent can be specified.
file_types:
List of file type groups to include in the scan. If empty, all
files are scanned and available data format processors are
applied. In addition, the binary content of the selected files
is always scanned as well.
files_limit_percent:
Limits the number of files to scan to this percentage of the
input FileSet. Number of files scanned is rounded down. Must
be between 0 and 100, inclusively. Both 0 and 100 means no
limit. Defaults to 0.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CloudStorageOptions)
),
)
_sym_db.RegisterMessage(CloudStorageOptions)
_sym_db.RegisterMessage(CloudStorageOptions.FileSet)
CloudStorageFileSet = _reflection.GeneratedProtocolMessageType(
"CloudStorageFileSet",
(_message.Message,),
dict(
DESCRIPTOR=_CLOUDSTORAGEFILESET,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message representing a set of files in Cloud Storage.
Attributes:
url:
The url, in the format ``gs://<bucket>/<path>``. Trailing
wildcard in the path is allowed.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CloudStorageFileSet)
),
)
_sym_db.RegisterMessage(CloudStorageFileSet)
CloudStoragePath = _reflection.GeneratedProtocolMessageType(
"CloudStoragePath",
(_message.Message,),
dict(
DESCRIPTOR=_CLOUDSTORAGEPATH,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message representing a single file or path in Cloud Storage.
Attributes:
path:
A url representing a file or path (no wildcards) in Cloud
Storage. Example: gs://[BUCKET\_NAME]/dictionary.txt
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.CloudStoragePath)
),
)
_sym_db.RegisterMessage(CloudStoragePath)
BigQueryOptions = _reflection.GeneratedProtocolMessageType(
"BigQueryOptions",
(_message.Message,),
dict(
DESCRIPTOR=_BIGQUERYOPTIONS,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Options defining BigQuery table and row identifiers.
Attributes:
table_reference:
Complete BigQuery table reference.
identifying_fields:
References to fields uniquely identifying rows within the
table. Nested fields in the format, like
``person.birthdate.year``, are allowed.
rows_limit:
Max number of rows to scan. If the table has more rows than
this value, the rest of the rows are omitted. If not set, or
if set to 0, all rows will be scanned. Only one of rows\_limit
and rows\_limit\_percent can be specified. Cannot be used in
conjunction with TimespanConfig.
rows_limit_percent:
Max percentage of rows to scan. The rest are omitted. The
number of rows scanned is rounded down. Must be between 0 and
100, inclusively. Both 0 and 100 means no limit. Defaults to
0. Only one of rows\_limit and rows\_limit\_percent can be
specified. Cannot be used in conjunction with TimespanConfig.
excluded_fields:
References to fields excluded from scanning. This allows you
to skip inspection of entire columns which you know have no
findings.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.BigQueryOptions)
),
)
_sym_db.RegisterMessage(BigQueryOptions)
StorageConfig = _reflection.GeneratedProtocolMessageType(
"StorageConfig",
(_message.Message,),
dict(
TimespanConfig=_reflection.GeneratedProtocolMessageType(
"TimespanConfig",
(_message.Message,),
dict(
DESCRIPTOR=_STORAGECONFIG_TIMESPANCONFIG,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Configuration of the timespan of the items to include in scanning.
Currently only supported when inspecting Google Cloud Storage and
BigQuery.
Attributes:
start_time:
Exclude files or rows older than this value.
end_time:
Exclude files or rows newer than this value. If set to zero,
no upper time limit is applied.
timestamp_field:
Specification of the field containing the timestamp of scanned
items. Used for data sources like Datastore or BigQuery. If
not specified for BigQuery, table last modification timestamp
is checked against given time span. The valid data types of
the timestamp field are: for BigQuery - timestamp, date,
datetime; for Datastore - timestamp. Datastore entity will be
scanned if the timestamp property does not exist or its value
is empty or invalid.
enable_auto_population_of_timespan_config:
When the job is started by a JobTrigger we will automatically
figure out a valid start\_time to avoid scanning files that
have not been modified since the last time the JobTrigger
executed. This will be based on the time of the execution of
the last run of the JobTrigger.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.StorageConfig.TimespanConfig)
),
),
DESCRIPTOR=_STORAGECONFIG,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Shared message indicating Cloud storage type.
Attributes:
datastore_options:
Google Cloud Datastore options specification.
cloud_storage_options:
Google Cloud Storage options specification.
big_query_options:
BigQuery options specification.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.StorageConfig)
),
)
_sym_db.RegisterMessage(StorageConfig)
_sym_db.RegisterMessage(StorageConfig.TimespanConfig)
BigQueryKey = _reflection.GeneratedProtocolMessageType(
"BigQueryKey",
(_message.Message,),
dict(
DESCRIPTOR=_BIGQUERYKEY,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Row key for identifying a record in BigQuery table.
Attributes:
table_reference:
Complete BigQuery table reference.
row_number:
Absolute number of the row from the beginning of the table at
the time of scanning.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.BigQueryKey)
),
)
_sym_db.RegisterMessage(BigQueryKey)
DatastoreKey = _reflection.GeneratedProtocolMessageType(
"DatastoreKey",
(_message.Message,),
dict(
DESCRIPTOR=_DATASTOREKEY,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Record key for a finding in Cloud Datastore.
Attributes:
entity_key:
Datastore entity key.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.DatastoreKey)
),
)
_sym_db.RegisterMessage(DatastoreKey)
Key = _reflection.GeneratedProtocolMessageType(
"Key",
(_message.Message,),
dict(
PathElement=_reflection.GeneratedProtocolMessageType(
"PathElement",
(_message.Message,),
dict(
DESCRIPTOR=_KEY_PATHELEMENT,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""A (kind, ID/name) pair used to construct a key path.
If either name or ID is set, the element is complete. If neither is set,
the element is incomplete.
Attributes:
kind:
The kind of the entity. A kind matching regex ``__.*__`` is
reserved/read-only. A kind must not contain more than 1500
bytes when UTF-8 encoded. Cannot be ``""``.
id_type:
The type of ID.
id:
The auto-allocated ID of the entity. Never equal to zero.
Values less than zero are discouraged and may not be supported
in the future.
name:
The name of the entity. A name matching regex ``__.*__`` is
reserved/read-only. A name must not be more than 1500 bytes
when UTF-8 encoded. Cannot be ``""``.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.Key.PathElement)
),
),
DESCRIPTOR=_KEY,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""A unique identifier for a Datastore entity. If a key's partition ID or
any of its path kinds or names are reserved/read-only, the key is
reserved/read-only. A reserved/read-only key is forbidden in certain
documented contexts.
Attributes:
partition_id:
Entities are partitioned into subsets, currently identified by
a project ID and namespace ID. Queries are scoped to a single
partition.
path:
The entity path. An entity path consists of one or more
elements composed of a kind and a string or numerical
identifier, which identify entities. The first element
identifies a *root entity*, the second element identifies a
*child* of the root entity, the third element identifies a
child of the second entity, and so forth. The entities
identified by all prefixes of the path are called the
element's *ancestors*. A path can never be empty, and a path
can have at most 100 elements.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.Key)
),
)
_sym_db.RegisterMessage(Key)
_sym_db.RegisterMessage(Key.PathElement)
RecordKey = _reflection.GeneratedProtocolMessageType(
"RecordKey",
(_message.Message,),
dict(
DESCRIPTOR=_RECORDKEY,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message for a unique key indicating a record that contains a finding.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.RecordKey)
),
)
_sym_db.RegisterMessage(RecordKey)
BigQueryTable = _reflection.GeneratedProtocolMessageType(
"BigQueryTable",
(_message.Message,),
dict(
DESCRIPTOR=_BIGQUERYTABLE,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message defining the location of a BigQuery table. A table is uniquely
identified by its project\_id, dataset\_id, and table\_name. Within a
query a table is often referenced with a string in the format of:
``<project_id>:<dataset_id>.<table_id>`` or
``<project_id>.<dataset_id>.<table_id>``.
Attributes:
project_id:
The Google Cloud Platform project ID of the project containing
the table. If omitted, project ID is inferred from the API
call.
dataset_id:
Dataset ID of the table.
table_id:
Name of the table.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.BigQueryTable)
),
)
_sym_db.RegisterMessage(BigQueryTable)
BigQueryField = _reflection.GeneratedProtocolMessageType(
"BigQueryField",
(_message.Message,),
dict(
DESCRIPTOR=_BIGQUERYFIELD,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""Message defining a field of a BigQuery table.
Attributes:
table:
Source table of the field.
field:
Designated field in the BigQuery table.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.BigQueryField)
),
)
_sym_db.RegisterMessage(BigQueryField)
EntityId = _reflection.GeneratedProtocolMessageType(
"EntityId",
(_message.Message,),
dict(
DESCRIPTOR=_ENTITYID,
__module__="google.cloud.dlp_v2.proto.storage_pb2",
__doc__="""An entity in a dataset is a field or set of fields that correspond to a
single person. For example, in medical records the ``EntityId`` might be
a patient identifier, or for financial records it might be an account
identifier. This message is used when generalizations or analysis must
take into account that multiple rows correspond to the same entity.
Attributes:
field:
Composite key indicating which field contains the entity
identifier.
""",
# @@protoc_insertion_point(class_scope:google.privacy.dlp.v2.EntityId)
),
)
_sym_db.RegisterMessage(EntityId)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n\031com.google.privacy.dlp.v2B\nDlpStorageP\001Z8google.golang.org/genproto/googleapis/privacy/dlp/v2;dlp\252\002\023Google.Cloud.Dlp.V2\312\002\023Google\\Cloud\\Dlp\\V2"
),
)
# @@protoc_insertion_point(module_scope)
| 34.568476
| 7,053
| 0.631546
|
33912c1486dc80f3475ee446d9407dbce5536cbd
| 569
|
py
|
Python
|
py/1249.minimum-remove-to-make-valid-parentheses.py
|
novate/brusher
|
39728af85655e18ff7c5cb3e2279ac6e8ab3f105
|
[
"MIT"
] | null | null | null |
py/1249.minimum-remove-to-make-valid-parentheses.py
|
novate/brusher
|
39728af85655e18ff7c5cb3e2279ac6e8ab3f105
|
[
"MIT"
] | null | null | null |
py/1249.minimum-remove-to-make-valid-parentheses.py
|
novate/brusher
|
39728af85655e18ff7c5cb3e2279ac6e8ab3f105
|
[
"MIT"
] | null | null | null |
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
stack = []
bad_i = set()
for i, ch in enumerate(s):
if ch == '(':
stack.append((i, ch))
if ch == ')':
if len(stack) > 0:
stack.pop()
else:
bad_i.add(i)
while len(stack) > 0:
i, ch = stack.pop()
bad_i.add(i)
ans = ''
for i in range(len(s)):
if i not in bad_i:
ans += s[i]
return ans
| 27.095238
| 50
| 0.370826
|
653945563f0205592e5f0e9e5915d06db870c775
| 3,505
|
py
|
Python
|
cli/create_legacy_catalog.py
|
Michaelliv/functions
|
721ae31506d435bffddac4d5fce53241107e85ac
|
[
"Apache-2.0"
] | null | null | null |
cli/create_legacy_catalog.py
|
Michaelliv/functions
|
721ae31506d435bffddac4d5fce53241107e85ac
|
[
"Apache-2.0"
] | null | null | null |
cli/create_legacy_catalog.py
|
Michaelliv/functions
|
721ae31506d435bffddac4d5fce53241107e85ac
|
[
"Apache-2.0"
] | null | null | null |
import json
from pathlib import Path
from typing import Union
import click as click
import yaml
from mlrun import import_function
from cli.helpers import PROJECT_ROOT
from cli.path_iterator import PathIterator
@click.command()
@click.option(
"-r", "--root-dir", default=PROJECT_ROOT, help="Path to root project directory"
)
def create_legacy_catalog(root_dir: Union[str, Path]):
root_dir = Path(root_dir)
if not root_dir.is_dir():
raise RuntimeError("Root directory must be a directory")
catalog = {}
file_list = Path(root_dir).glob("**/*.yaml")
for file in sorted(file_list, key=lambda f: str(f)):
file = file.resolve()
click.echo(f"Now inspecting file: {file}")
if file.is_file():
try:
fn = import_function(str(file))
except Exception as e:
click.echo(f"failed to load func {file}, {e}")
continue
if not fn.kind or fn.kind in ["", "local", "handler"]:
click.echo(f"illegal function or kind in {file}, kind={fn.kind}")
continue
if fn.metadata.name in catalog:
entry = catalog[fn.metadata.name]
else:
file_dir = file.parent
notebook_iterator = PathIterator(
root=file_dir,
rule=lambda p: p.name.endswith(".ipynb"),
as_path=True,
)
notebooks = list(notebook_iterator)
doc_file = file_dir if not notebooks else file_dir / notebooks[0]
entry = {
"description": fn.spec.description,
"categories": fn.metadata.categories,
"kind": fn.kind,
"docfile": str(doc_file.resolve()),
"versions": {},
}
entry["versions"][fn.metadata.tag or "latest"] = str(file)
print(fn.metadata.name, entry)
catalog[fn.metadata.name] = entry
with open("catalog.yaml", "w") as fp:
yaml.dump(catalog, fp)
with open("catalog.json", "w") as fp:
json.dump(catalog, fp)
mdheader = """# Functions hub
This functions hub is intended to be a centralized location for open source contributions of function components.
These are functions expected to be run as independent mlrun pipeline compnents, and as public contributions,
it is expected that contributors follow certain guidelines/protocols (please chip-in).
## Functions
"""
with open(root_dir / "README.md", "w") as fp:
fp.write(mdheader)
rows = []
for k, v in catalog.items():
kind = v["kind"]
if kind == "remote":
kind = "nuclio"
row = [
f"[{k}]({v['docfile']})",
kind,
v["description"],
", ".join(v["categories"] or []),
]
rows.append(row)
text = gen_md_table(["function", "kind", "description", "categories"], rows)
fp.write(text)
def gen_md_table(header, rows=None):
rows = [] if rows is None else rows
def gen_list(items=None):
items = [] if items is None else items
out = "|"
for i in items:
out += " {} |".format(i)
return out
out = gen_list(header) + "\n" + gen_list(len(header) * ["---"]) + "\n"
for r in rows:
out += gen_list(r) + "\n"
return out
| 31.294643
| 115
| 0.547504
|
a537230e3a580e31af12b6bf30f593f18f291f0e
| 1,613
|
py
|
Python
|
tests/test_interpret.py
|
leroyvn/pinttrs
|
82e0a797078f80ca5db8adc5ff5f5d7db5fcbb8f
|
[
"MIT"
] | 6
|
2021-02-06T15:07:46.000Z
|
2022-01-15T10:26:18.000Z
|
tests/test_interpret.py
|
leroyvn/pinttrs
|
82e0a797078f80ca5db8adc5ff5f5d7db5fcbb8f
|
[
"MIT"
] | 2
|
2021-03-05T15:10:15.000Z
|
2021-07-22T15:24:59.000Z
|
tests/test_interpret.py
|
leroyvn/pinttrs
|
82e0a797078f80ca5db8adc5ff5f5d7db5fcbb8f
|
[
"MIT"
] | null | null | null |
import pint
import pytest
import pinttr
from pinttr import interpret_units
def test_interpret_units():
"""
Unit tests for :func:`pinttrs.interpret_units`.
"""
# We'll use the default unit registry
ureg = pinttr.get_unit_registry()
# fmt: off
# Normal operation: units are applied and the '_units' field is removed
assert interpret_units({"a": 1.0, "a_units": "m"}) == {"a": 1.0 * ureg.m}
# Also works if the key of the magnitude field is an empty string
assert interpret_units({"": 1.0, "_units": "m"}) == {"": 1.0 * ureg.m}
# Also works if the magnitude field key is '_units'
assert interpret_units({
"_units": 1.0,
"_units_units": "m"
}) == {
"_units": 1.0 * ureg.m
}
# If a unit field has no associated magnitude, nothing changes
assert interpret_units({"a_units": 1.,}) == {"a_units": 1.}
assert interpret_units({"_units": "m",}) == {"_units": "m"}
# fmt: on
# If inplace is False, the dict is not modified
d = {"a": 1.0, "a_units": "m"}
assert interpret_units(d) != d
# If inplace is True, the dict is modified
interpret_units(d, inplace=True)
assert d == {"a": 1.0 * ureg.m}
# Corner cases
# -- If magnitude entry is already a Pint quantity, conversion is performed ...
d = interpret_units({"a": 1.0 * ureg.m, "a_units": "km"})
assert d == {"a": 1.0 * ureg.m}
assert d["a"].units == ureg.km
# -- ... and will fail if incompatible units are used
with pytest.raises(pint.DimensionalityError):
interpret_units({"a": 1.0 * ureg.s, "a_units": "m"})
| 33.604167
| 83
| 0.608803
|
4f81bda979952f906980f87560440661b2ca53a0
| 6,392
|
py
|
Python
|
3.Tasks/Dialog-RL-GAN/gen/gen_data.py
|
imageslr/NLP
|
f56796a86620accd487480e5c3bd992cf3dc7578
|
[
"MIT"
] | 2
|
2019-03-25T03:10:02.000Z
|
2020-07-05T22:05:44.000Z
|
3.Tasks/Dialog-RL-GAN/gen/gen_data.py
|
imageslr/NLP
|
f56796a86620accd487480e5c3bd992cf3dc7578
|
[
"MIT"
] | null | null | null |
3.Tasks/Dialog-RL-GAN/gen/gen_data.py
|
imageslr/NLP
|
f56796a86620accd487480e5c3bd992cf3dc7578
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
import random
import sys
from six.moves import xrange
import numpy as np
from tensorflow.python.platform import gfile
import utils.data_utils as data_utils
from utils.utils import just_message as just
def get_dataset(gen_config):
"""
获取训练数据
:return: vocab, rev_vocab, dev_set, train_set
"""
train_path = os.path.join(gen_config.train_dir, "chitchat.train")
voc_file_path = [train_path + ".answer", train_path + ".query"]
vocab_path = os.path.join(gen_config.train_dir, "vocab%d.all" % gen_config.vocab_size)
data_utils.create_vocabulary(vocab_path, voc_file_path, gen_config.vocab_size)
vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) # {dog: 0, cat: 1} [dog, cat]
print(just("Preparing Chitchat gen_data in %s" % gen_config.train_dir))
train_query, train_answer, dev_query, dev_answer = data_utils.prepare_chitchat_data(
gen_config.train_dir, vocab, gen_config.vocab_size)
# Read disc_data into buckets and compute their sizes.
print(just("Reading development and training gen_data (limit: %d)."
% gen_config.max_train_data_size))
dev_set = read_data(gen_config, dev_query, dev_answer)
train_set = read_data(gen_config, train_query, train_answer, gen_config.max_train_data_size)
return vocab, rev_vocab, dev_set, train_set
def read_data(config, source_path, target_path, max_size=None):
"""
读取数据,读的时候按照源序列与目标序列的长度分桶
返回值:dataset: [bucket1, bucket2, ...],每个bucket是一个list,list中每个元素是一个[seq_ids, target_ids]对
:param config:
:param source_path:
:param target_path:
:param max_size:
:return:
"""
data_set = [[] for _ in config.buckets]
with gfile.GFile(source_path, mode="r") as source_file:
with gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading disc_data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(config.buckets): #[bucket_id, (source_size, target_size)]
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def get_batch(model, train_data, bucket_id, batch_size, type=0):
"""
从一个指定的桶中获取一个随机的batch,用于step(..)的训练。step(..)接受的数据是time-major的
Args:
train_data: 大小是分桶个数的列表,每个元素是一个列表,由(Q, A)对组成
bucket_id: integer, which bucket to get the batch for.
type: - 0:正常的获取预训练用的数据
- 1:TODO 好像没用
- 2:对抗训练中判别器的训练数据,这个也好像没用
Returns:
(batch_encoder_inputs, batch_decoder_inputs, batch_weights, batch_source_encoder, batch_source_decoder)
依次是(time-major,time-major,time-major, batch-major, batch-major)
"""
encoder_size, decoder_size = model.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data, 用到的type都是0,都是从桶里随机挑一组
# pad them if needed, reverse encoder inputs and add GO to decoder.
batch_source_encoder, batch_source_decoder = [], []
if type == 1:
batch_size = 1
for batch_i in xrange(batch_size):
if type == 1: # 返回桶内所有数据
encoder_input, decoder_input = train_data[bucket_id]
elif type == 2: # 取桶内第一组,encoder_input是第batch_i个单词,encoder只有一个单词 # TODO 但下面是把它当数组用的,这里就报错了
# print("disc_data[bucket_id]: ", disc_data[bucket_id][0])
encoder_input_a, decoder_input = train_data[bucket_id][0]
encoder_input = encoder_input_a[batch_i]
elif type == 0: # 桶内随机挑一组
encoder_input, decoder_input = random.choice(train_data[bucket_id])
# print("train en: %s, de: %s" %(encoder_input, decoder_input))
batch_source_encoder.append(encoder_input)
batch_source_decoder.append(decoder_input)
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the disc_data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(batch_size, dtype=np.float32)
for batch_idx in xrange(batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol. 如果是PAD,设置它的权值为0
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return (batch_encoder_inputs, batch_decoder_inputs, batch_weights, batch_source_encoder, batch_source_decoder)
| 45.985612
| 128
| 0.672403
|
52f956dee65edb9a6b6790cd29430b84bb4582e5
| 1,682
|
py
|
Python
|
landtile/mesh.py
|
take9i/landscapes_builder
|
f4738730af345b073143a0f332f8fc1e930fdee5
|
[
"MIT"
] | null | null | null |
landtile/mesh.py
|
take9i/landscapes_builder
|
f4738730af345b073143a0f332f8fc1e930fdee5
|
[
"MIT"
] | null | null | null |
landtile/mesh.py
|
take9i/landscapes_builder
|
f4738730af345b073143a0f332f8fc1e930fdee5
|
[
"MIT"
] | 1
|
2021-11-07T07:04:33.000Z
|
2021-11-07T07:04:33.000Z
|
# mesh utilities
import os
import json
import numpy as np
from stl import mesh
def build_mesh(vertices, faces):
vs = np.array(vertices)
fs = np.array(faces)
hedron = mesh.Mesh(np.zeros(len(faces), dtype=mesh.Mesh.dtype))
for i, f in enumerate(fs):
for j in range(3):
hedron.vectors[i][j] = vs[f[j],:]
return hedron
def merge_meshes(meshes):
return mesh.Mesh(np.concatenate([m.data.copy() for m in meshes]))
def write_obj(meshes, materials, path):
base = os.path.splitext(os.path.basename(path))[0]
with open(path, 'w') as fd:
print = lambda s: fd.write(s + '\n')
print('# take9 handwrite obj file:')
print('mtllib common.mtl')
iv, inv = 1, 1
for mesh, material in zip(meshes, materials):
mesh.update_normals()
print(f'o {base}_{iv}')
print(f'usemtl {material}')
for vs in mesh.vectors:
for x, y, z in vs:
print(f'v {x} {z} {-y}')
for x, y, z in mesh.normals:
print(f'vn {x} {z} {-y}')
print('s off')
for i, vs in enumerate(mesh.vectors):
j, k = i * 3 + iv, i + inv
print(f'f {j}//{k} {j+1}//{k} {j+2}//{k}')
iv += len(mesh.vectors) * 3
inv += len(mesh.vectors)
def write_batchtable(properties, path):
batchtable = {}
batchtable['batchId'] = [i for i, _ in enumerate(properties)]
for k in properties[0].keys():
batchtable[k] = [p[k] for p in properties]
with open(path, 'wb') as fd:
fd.write(json.dumps(batchtable, indent=2, ensure_ascii=False).encode('utf-8'))
| 34.326531
| 86
| 0.548157
|
04ce23ed2c4b638b3bf917c8a70b8615d6770c5f
| 6,222
|
py
|
Python
|
shooting.py
|
jillianchiam/pipulp98j
|
154a4b54fdfbb680a7786d99fa2c2549d1b548f0
|
[
"bzip2-1.0.6"
] | null | null | null |
shooting.py
|
jillianchiam/pipulp98j
|
154a4b54fdfbb680a7786d99fa2c2549d1b548f0
|
[
"bzip2-1.0.6"
] | null | null | null |
shooting.py
|
jillianchiam/pipulp98j
|
154a4b54fdfbb680a7786d99fa2c2549d1b548f0
|
[
"bzip2-1.0.6"
] | null | null | null |
#1 - Import library
import pygame, sys
import os
import math
import random
from pygame.locals import *
#2 - Initialize game
pygame.init()
width, height = 640, 480
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False, False]
playerpos = [150, 100]
acc=[0,0]
hats=[]
pygame.display.set_caption('THIS IS WAR!')
#2.1 - add the bad guys decrease the
#badtimer every frame until it is zero and then you spawn a new badger
badtimer=100
badtimer1=0
coyotes=[[640,100]]
healthvalue=194
#3 - load images
current_path = os.path.dirname(r'''C:\Users\jilli\AppData\Local\Programs\Python\Python36\shooting.py''') # Where your .py file is located
resource_path = os.path.join(current_path, 'resources') # The resource folder path
image_path = os.path.join(resource_path, 'images') # The image folder path
player = pygame.image.load(os.path.join(image_path, 'perry.png'))
grass = pygame.image.load(os.path.join(image_path, 'grass.png'))
sunflower = pygame.image.load(os.path.join(image_path, 'sunflower.png'))
hat = pygame.image.load(os.path.join(image_path, 'perryhat.png'))
coyoteimg1 = pygame.image.load(os.path.join(image_path, 'coyote.png'))
coyoteimg = coyoteimg1
badguyimg1 = pygame.image.load(os.path.join(image_path,'badguy.png'))
badguyimg=badguyimg1
#4 - Loop through game so it doesn't halt
while True:
badtimer = badtimer - 1
#5 - clears the screen before drawing it again
screen.fill(0)
#6 - draw screen elements (draw backgorund before player so player is above background
for x in range(width//grass.get_width()+1): # range() can only work with integers, but dividing
#with the / operator always results in a float value
for y in range(height//grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(sunflower,(0,30))
screen.blit(sunflower,(0,135))
screen.blit(sunflower,(0,240))
screen.blit(sunflower,(0,345 ))
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32), position[0]-(playerpos[0]+26))
playerrotates = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrotates.get_rect().width/2, playerpos[1]-playerrotates.get_rect().height/2)
screen.blit(playerrotates, playerpos1)
# 6.2 - Draw hats
for perryhat in hats:
index=0
velx = math.cos(perryhat[0])*10 #10 is the speed of the arrow
vely = math.sin(perryhat[0])*10
perryhat[1] = perryhat[1] + velx
perryhat[2] = perryhat[2] + vely
if perryhat[1] < -64 or perryhat[2] > 640 or perryhat[2] < -64 or perryhat[2] > 480:
hats.pop(index) #If no index is specified, a.pop() removes and
# returns the last item in the list.
index = index + 1
for projectile in hats:
hats1 = pygame.transform.rotate(hat, 360-projectile[0]*57.29) # multiply radians by approximately 57.29 or 360/2π
screen.blit(hats1, (projectile[1], projectile[2]))
#6.3 - Draw coyotes
if badtimer==0:
coyotes.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for coyote in coyotes:
if coyote[0]<-64:
coyotes.pop(index)
coyote[0]-=7
index+=1
for coyote in coyotes:
screen.blit(coyoteimg, coyote)
# 6.3.1 - Attack sunflowers
badrect=pygame.Rect(coyoteimg.get_rect())
badrect.top=coyote[1]
badrect.left=coyote[0]
if badrect.left<64:
healthvalue -= random.randint(5,20)
coyotes.pop(index)
#6.3.2 - check for collisions
index1 = 0
for perryhat in hats: #rect here store rectangular coordinates
hatrect = pygame.Rect(hat.get_rect())
hatrect.left=perryhat[1]
hatrect.top=perryhat[2]
if badrect.colliderect(hatrect):
acc[0]+=1
coyotes.pop(index) # pop() removes and returns last object or obj from the list
hats.pop(index1)
index1 += 1
#6.3.3 - placing next bed guy into screen
index += 1
for coyote in coyotes:
screen.blit(coyoteimg, coyote)
#7 - update the screen
pygame.display.flip() # Update the full display Surface to the screen
for event in pygame.event.get(): #event is for actions made by user
#like pressing a key
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#8 - Keys!
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
position=pygame.mouse.get_pos()
acc[1]+=1
hats.append([math.atan2(position[1]-(playerpos1[1]+32),
position[0]-(playerpos1[0]+26)),
playerpos1[0]+32,
playerpos1[1]+32])
#9 - Move player
if keys[0]:
playerpos[1]= playerpos[1] - 5
elif keys[1]:
playerpos[1]= playerpos[1] + 5
elif keys[2]:
playerpos[0] = playerpos[0] - 5
elif keys[3]:
playerpos[0] = playerpos[0] + 5
| 33.632432
| 138
| 0.565092
|
a34d414ac0bb01c0e435ae91466f7dd1017847d0
| 949
|
py
|
Python
|
userbot/plugins/bc.py
|
Rahulchoudhary17/JaaduBot
|
80719d9ec779306b2648e49a4621ec68ef42b4be
|
[
"MIT"
] | 23
|
2020-06-20T09:02:59.000Z
|
2020-11-29T12:01:37.000Z
|
userbot/plugins/bc.py
|
madhav2726/JaaduBot
|
3716d329d5e669ee59a154e170a8f907d38aa6db
|
[
"MIT"
] | null | null | null |
userbot/plugins/bc.py
|
madhav2726/JaaduBot
|
3716d329d5e669ee59a154e170a8f907d38aa6db
|
[
"MIT"
] | 128
|
2020-06-20T09:03:21.000Z
|
2021-11-16T07:15:40.000Z
|
#JaaduBot Exclusive
#Give Credits if you copy
from telethon import events
import random, re
from uniborg.util import admin_cmd
RUNSREACTS = [
"`Tufaano Mein Chhatri Nahi Kholi Jaati; Bra Se Pehle Panty Nahi Kholi Jaati; ‘Viagra’ Khana Shuru Kar, Mere Dost; Kyunki Zubaan Aur Ungli Se Aurat Nahi Chodi Jaati!`",
"`Ranger tumara baap tha hai aur rahega smje`",
"`Yaad Mein Unki Kiya Loota Diya..Itni Mari Muthh Ke Topa Sujaa Diya..Hume Kamzor Hota Dekh Jab Muskurayi Woh…….Unki Khushi Dekh Kar Ek Baar Fhir Hila Diya..`",
"` Lund Toh Choot Ki Baddi Shaan Hoti Hai; Lund Bina Choot Bhi Shamshaan Hoti Hai; Yeh Zindgi Ke Saare Khel Toh Lund Ke Hein; Warna Choot Aur Gaand Ek Samaan Hoti Hai!`",
]
@borg.on(admin_cmd(pattern="bc"))
async def _(event):
if event.fwd_from:
return
bro = random.randint(0, len(RUNSREACTS) - 1)
reply_text = RUNSREACTS[bro]
await event.edit(reply_text)
| 43.136364
| 175
| 0.701791
|
6599144aa4b3561da2e6bc20809025cc71ad6c35
| 13,649
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/kusto/v20200918/get_cluster.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/kusto/v20200918/get_cluster.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/kusto/v20200918/get_cluster.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
]
@pulumi.output_type
class GetClusterResult:
"""
Class representing a Kusto cluster.
"""
def __init__(__self__, data_ingestion_uri=None, enable_disk_encryption=None, enable_double_encryption=None, enable_purge=None, enable_streaming_ingest=None, engine_type=None, identity=None, key_vault_properties=None, language_extensions=None, location=None, name=None, optimized_autoscale=None, provisioning_state=None, sku=None, state=None, state_reason=None, tags=None, trusted_external_tenants=None, type=None, uri=None, virtual_network_configuration=None, zones=None):
if data_ingestion_uri and not isinstance(data_ingestion_uri, str):
raise TypeError("Expected argument 'data_ingestion_uri' to be a str")
pulumi.set(__self__, "data_ingestion_uri", data_ingestion_uri)
if enable_disk_encryption and not isinstance(enable_disk_encryption, bool):
raise TypeError("Expected argument 'enable_disk_encryption' to be a bool")
pulumi.set(__self__, "enable_disk_encryption", enable_disk_encryption)
if enable_double_encryption and not isinstance(enable_double_encryption, bool):
raise TypeError("Expected argument 'enable_double_encryption' to be a bool")
pulumi.set(__self__, "enable_double_encryption", enable_double_encryption)
if enable_purge and not isinstance(enable_purge, bool):
raise TypeError("Expected argument 'enable_purge' to be a bool")
pulumi.set(__self__, "enable_purge", enable_purge)
if enable_streaming_ingest and not isinstance(enable_streaming_ingest, bool):
raise TypeError("Expected argument 'enable_streaming_ingest' to be a bool")
pulumi.set(__self__, "enable_streaming_ingest", enable_streaming_ingest)
if engine_type and not isinstance(engine_type, str):
raise TypeError("Expected argument 'engine_type' to be a str")
pulumi.set(__self__, "engine_type", engine_type)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if key_vault_properties and not isinstance(key_vault_properties, dict):
raise TypeError("Expected argument 'key_vault_properties' to be a dict")
pulumi.set(__self__, "key_vault_properties", key_vault_properties)
if language_extensions and not isinstance(language_extensions, dict):
raise TypeError("Expected argument 'language_extensions' to be a dict")
pulumi.set(__self__, "language_extensions", language_extensions)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if optimized_autoscale and not isinstance(optimized_autoscale, dict):
raise TypeError("Expected argument 'optimized_autoscale' to be a dict")
pulumi.set(__self__, "optimized_autoscale", optimized_autoscale)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if state_reason and not isinstance(state_reason, str):
raise TypeError("Expected argument 'state_reason' to be a str")
pulumi.set(__self__, "state_reason", state_reason)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trusted_external_tenants and not isinstance(trusted_external_tenants, list):
raise TypeError("Expected argument 'trusted_external_tenants' to be a list")
pulumi.set(__self__, "trusted_external_tenants", trusted_external_tenants)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if uri and not isinstance(uri, str):
raise TypeError("Expected argument 'uri' to be a str")
pulumi.set(__self__, "uri", uri)
if virtual_network_configuration and not isinstance(virtual_network_configuration, dict):
raise TypeError("Expected argument 'virtual_network_configuration' to be a dict")
pulumi.set(__self__, "virtual_network_configuration", virtual_network_configuration)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="dataIngestionUri")
def data_ingestion_uri(self) -> str:
"""
The cluster data ingestion URI.
"""
return pulumi.get(self, "data_ingestion_uri")
@property
@pulumi.getter(name="enableDiskEncryption")
def enable_disk_encryption(self) -> Optional[bool]:
"""
A boolean value that indicates if the cluster's disks are encrypted.
"""
return pulumi.get(self, "enable_disk_encryption")
@property
@pulumi.getter(name="enableDoubleEncryption")
def enable_double_encryption(self) -> Optional[bool]:
"""
A boolean value that indicates if double encryption is enabled.
"""
return pulumi.get(self, "enable_double_encryption")
@property
@pulumi.getter(name="enablePurge")
def enable_purge(self) -> Optional[bool]:
"""
A boolean value that indicates if the purge operations are enabled.
"""
return pulumi.get(self, "enable_purge")
@property
@pulumi.getter(name="enableStreamingIngest")
def enable_streaming_ingest(self) -> Optional[bool]:
"""
A boolean value that indicates if the streaming ingest is enabled.
"""
return pulumi.get(self, "enable_streaming_ingest")
@property
@pulumi.getter(name="engineType")
def engine_type(self) -> Optional[str]:
"""
The engine type
"""
return pulumi.get(self, "engine_type")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the cluster, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="keyVaultProperties")
def key_vault_properties(self) -> Optional['outputs.KeyVaultPropertiesResponse']:
"""
KeyVault properties for the cluster encryption.
"""
return pulumi.get(self, "key_vault_properties")
@property
@pulumi.getter(name="languageExtensions")
def language_extensions(self) -> 'outputs.LanguageExtensionsListResponse':
"""
List of the cluster's language extensions.
"""
return pulumi.get(self, "language_extensions")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="optimizedAutoscale")
def optimized_autoscale(self) -> Optional['outputs.OptimizedAutoscaleResponse']:
"""
Optimized auto scale definition.
"""
return pulumi.get(self, "optimized_autoscale")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.AzureSkuResponse':
"""
The SKU of the cluster.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the resource.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateReason")
def state_reason(self) -> str:
"""
The reason for the cluster's current state.
"""
return pulumi.get(self, "state_reason")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedExternalTenants")
def trusted_external_tenants(self) -> Optional[Sequence['outputs.TrustedExternalTenantResponse']]:
"""
The cluster's external tenants.
"""
return pulumi.get(self, "trusted_external_tenants")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uri(self) -> str:
"""
The cluster URI.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter(name="virtualNetworkConfiguration")
def virtual_network_configuration(self) -> Optional['outputs.VirtualNetworkConfigurationResponse']:
"""
Virtual network definition.
"""
return pulumi.get(self, "virtual_network_configuration")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
The availability zones of the cluster.
"""
return pulumi.get(self, "zones")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
data_ingestion_uri=self.data_ingestion_uri,
enable_disk_encryption=self.enable_disk_encryption,
enable_double_encryption=self.enable_double_encryption,
enable_purge=self.enable_purge,
enable_streaming_ingest=self.enable_streaming_ingest,
engine_type=self.engine_type,
identity=self.identity,
key_vault_properties=self.key_vault_properties,
language_extensions=self.language_extensions,
location=self.location,
name=self.name,
optimized_autoscale=self.optimized_autoscale,
provisioning_state=self.provisioning_state,
sku=self.sku,
state=self.state,
state_reason=self.state_reason,
tags=self.tags,
trusted_external_tenants=self.trusted_external_tenants,
type=self.type,
uri=self.uri,
virtual_network_configuration=self.virtual_network_configuration,
zones=self.zones)
def get_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
Use this data source to access information about an existing resource.
:param str cluster_name: The name of the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:kusto/v20200918:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
data_ingestion_uri=__ret__.data_ingestion_uri,
enable_disk_encryption=__ret__.enable_disk_encryption,
enable_double_encryption=__ret__.enable_double_encryption,
enable_purge=__ret__.enable_purge,
enable_streaming_ingest=__ret__.enable_streaming_ingest,
engine_type=__ret__.engine_type,
identity=__ret__.identity,
key_vault_properties=__ret__.key_vault_properties,
language_extensions=__ret__.language_extensions,
location=__ret__.location,
name=__ret__.name,
optimized_autoscale=__ret__.optimized_autoscale,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
state=__ret__.state,
state_reason=__ret__.state_reason,
tags=__ret__.tags,
trusted_external_tenants=__ret__.trusted_external_tenants,
type=__ret__.type,
uri=__ret__.uri,
virtual_network_configuration=__ret__.virtual_network_configuration,
zones=__ret__.zones)
| 40.262537
| 476
| 0.671185
|
13bd497e51bfaca0c9218dd73bbc517fceed1b4d
| 7,538
|
py
|
Python
|
flexget/components/archives/decompress.py
|
ksurl/flexget
|
a54dc25780b62f80d2e638278277a945428ffd05
|
[
"MIT"
] | null | null | null |
flexget/components/archives/decompress.py
|
ksurl/flexget
|
a54dc25780b62f80d2e638278277a945428ffd05
|
[
"MIT"
] | 13
|
2022-03-28T03:25:30.000Z
|
2022-03-28T10:25:44.000Z
|
flexget/components/archives/decompress.py
|
aidan-/Flexget
|
5622436a412918ef204c51e9f984cd9fe784ea7c
|
[
"MIT"
] | null | null | null |
import os
import re
from loguru import logger
from flexget import plugin
from flexget.components.archives import utils as archiveutil
from flexget.event import event
from flexget.utils.template import RenderError, render_from_entry
logger = logger.bind(name='decompress')
def fail_entry_with_error(entry, error):
"""
Log error message at error level and fail the entry
"""
logger.error(error)
entry.fail(error)
def open_archive_entry(entry):
"""
Convenience function for opening archives from entries. Returns an archive.Archive object
"""
archive_path = entry.get('location', '')
if not archive_path:
logger.error('Entry does not appear to represent a local file.')
return
if not os.path.exists(archive_path):
logger.error('File no longer exists: {}', entry['location'])
return
try:
archive = archiveutil.open_archive(archive_path)
except archiveutil.BadArchive as error:
fail_entry_with_error(entry, 'Bad archive: %s (%s)' % (archive_path, error))
except archiveutil.NeedFirstVolume:
logger.error('Not the first volume: {}', archive_path)
except archiveutil.ArchiveError as error:
fail_entry_with_error(entry, 'Failed to open Archive: %s (%s)' % (archive_path, error))
else:
return archive
def get_output_path(to, entry):
"""Determine which path to output to"""
try:
if to:
return render_from_entry(to, entry)
else:
return os.path.dirname(entry.get('location'))
except RenderError:
raise plugin.PluginError('Could not render path: %s' % to)
def extract_info(info, archive, to, keep_dirs, test=False):
"""Extract ArchiveInfo object"""
destination = get_destination_path(info, to, keep_dirs)
if test:
logger.info('Would extract: {} to {}', info.filename, destination)
return
logger.debug('Attempting to extract: {} to {}', info.filename, destination)
try:
info.extract(archive, destination)
except archiveutil.FSError as error:
logger.error('OS error while creating file: {} ({})', destination, error)
except archiveutil.FileAlreadyExists as error:
logger.warning('File already exists: {}', destination)
except archiveutil.ArchiveError as error:
logger.error('Failed to extract file: {} from {} ({})', info.filename, archive.path, error)
def get_destination_path(info, to, keep_dirs):
"""Generate the destination path for a given file"""
path_suffix = info.path if keep_dirs else os.path.basename(info.path)
return os.path.join(to, path_suffix)
def is_match(info, pattern):
"""Returns whether an info record matches the supplied regex"""
match = re.compile(pattern, re.IGNORECASE).match
is_match = bool(match(info.filename))
if is_match:
logger.debug('Found matching file: {}', info.filename)
else:
logger.debug('File did not match regexp: {}', info.filename)
return is_match
class Decompress:
r"""
Extracts files from Zip or RAR archives. By default this plugin will extract to the same
directory as the source archive, preserving directory structure from the archive.
This plugin requires the rarfile Python module and unrar command line utility to extract RAR
archives.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar. If no path is specified, archive contents will
be extraced in the same directory as the archve itself.
keep_dirs: [yes|no] (default: yes) Indicates whether to preserve the directory
structure from within the archive in the destination path.
mask: Shell-style file mask; any matching files will be extracted. When used, this
field will override regexp.
regexp: Regular expression pattern; any matching files will be extracted. Overridden
by mask if specified.
unrar_tool: Specifies the path of the unrar tool. Only necessary if its location is not
defined in the operating system's PATH environment variable.
delete_archive: [yes|no] (default: no) Delete this archive after extraction is completed.
Example:
decompress:
to: '/Volumes/External/TV/{{series_name}}/Season {{series_season}}/'
keep_dirs: yes
regexp: '.*s\d{1,2}e\d{1,2}.*\.mkv'
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string'},
'keep_dirs': {'type': 'boolean'},
'mask': {'type': 'string'},
'regexp': {'type': 'string', 'format': 'regex'},
'unrar_tool': {'type': 'string'},
'delete_archive': {'type': 'boolean'},
},
'additionalProperties': False,
},
]
}
@staticmethod
def prepare_config(config):
"""Prepare config for processing"""
from fnmatch import translate
if not isinstance(config, dict):
config = {}
config.setdefault('to', '')
config.setdefault('keep_dirs', True)
config.setdefault('unrar_tool', '')
config.setdefault('delete_archive', False)
# If mask was specified, turn it in to a regexp
if 'mask' in config:
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if 'regexp' not in config:
config['regexp'] = '.'
return config
@staticmethod
def handle_entry(entry, config, test=False):
"""
Extract matching files into the directory specified
Optionally delete the original archive if config.delete_archive is True
"""
archive = open_archive_entry(entry)
if not archive:
return
to = get_output_path(config['to'], entry)
for info in archive.infolist():
if is_match(info, config['regexp']):
extract_info(info, archive, to, config['keep_dirs'], test=test)
if config['delete_archive']:
if not test:
archive.delete()
else:
logger.info(f'Would delete archive {archive.path}')
archive.close()
else:
archive.close()
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_start(self, task, config):
try:
archiveutil.RarArchive.check_import()
except archiveutil.NeedRarFile as e:
raise plugin.PluginError(e)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_output(self, task, config):
"""Task handler for archive_extract"""
if isinstance(config, bool) and not config:
return
config = self.prepare_config(config)
archiveutil.rarfile_set_tool_path(config)
archiveutil.rarfile_set_path_sep(os.path.sep)
for entry in task.accepted:
self.handle_entry(entry, config, test=task.options.test)
@event('plugin.register')
def register_plugin():
plugin.register(Decompress, 'decompress', api_ver=2)
| 33.954955
| 100
| 0.622048
|
37f8aff4c2cb47e4ca13d27f0dfd26b60122bdf4
| 3,691
|
py
|
Python
|
sandbox/rocky/tf/distributions/categorical.py
|
keuntaeklee/rllab
|
d2826839ee2c3ff08c78a5ddf5fbb61373987434
|
[
"MIT"
] | null | null | null |
sandbox/rocky/tf/distributions/categorical.py
|
keuntaeklee/rllab
|
d2826839ee2c3ff08c78a5ddf5fbb61373987434
|
[
"MIT"
] | null | null | null |
sandbox/rocky/tf/distributions/categorical.py
|
keuntaeklee/rllab
|
d2826839ee2c3ff08c78a5ddf5fbb61373987434
|
[
"MIT"
] | null | null | null |
import numpy as np
from .base import Distribution
import tensorflow as tf
from sandbox.rocky.tf.misc import tensor_utils
TINY = 1e-8
def from_onehot(x_var):
ret = np.zeros((len(x_var),), 'int32')
nonzero_n, nonzero_a = np.nonzero(x_var)
ret[nonzero_n] = nonzero_a
return ret
class Categorical(Distribution):
def __init__(self, dim):
self._dim = dim
weights_var = tf.compat.v1.placeholder(
dtype=tf.float32,
shape=(None, dim),
name="weights"
)
self._f_sample = tensor_utils.compile_function(
inputs=[weights_var],
outputs=tf.random.categorical(logits=tf.math.log(weights_var + 1e-8), num_samples=1)[:, 0],
)
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
ndims = old_prob_var.get_shape().ndims
# Assume layout is N * A
return tf.reduce_sum(
input_tensor=old_prob_var * (tf.math.log(old_prob_var + TINY) - tf.math.log(new_prob_var + TINY)),
axis=ndims - 1
)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info["prob"]
new_prob = new_dist_info["prob"]
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=-1
)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
ndims = old_prob_var.get_shape().ndims
x_var = tf.cast(x_var, tf.float32)
# Assume layout is N * A
return (tf.reduce_sum(input_tensor=new_prob_var * x_var, axis=ndims - 1) + TINY) / \
(tf.reduce_sum(input_tensor=old_prob_var * x_var, axis=ndims - 1) + TINY)
def entropy_sym(self, dist_info_vars):
probs = dist_info_vars["prob"]
return -tf.reduce_sum(input_tensor=probs * tf.math.log(probs + TINY), axis=1)
def cross_entropy_sym(self, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
ndims = old_prob_var.get_shape().ndims
# Assume layout is N * A
return tf.reduce_sum(
input_tensor=old_prob_var * (- tf.math.log(new_prob_var + TINY)),
axis=ndims - 1
)
def entropy(self, info):
probs = info["prob"]
return -np.sum(probs * np.log(probs + TINY), axis=1)
def log_likelihood_sym(self, x_var, dist_info_vars):
probs = dist_info_vars["prob"]
ndims = probs.get_shape().ndims
return tf.math.log(tf.reduce_sum(input_tensor=probs * tf.cast(x_var, tf.float32), axis=ndims - 1) + TINY)
def log_likelihood(self, xs, dist_info):
probs = dist_info["prob"]
# Assume layout is N * A
return np.log(np.sum(probs * xs, axis=-1) + TINY)
@property
def dist_info_specs(self):
return [("prob", (self.dim,))]
def sample(self, dist_info):
return self._f_sample(dist_info["prob"])
def sample_sym(self, dist_info):
probs = dist_info["prob"]
samples = tf.random.categorical(logits=tf.math.log(probs + 1e-8), num_samples=1)[:, 0]
return tf.nn.embedding_lookup(params=np.eye(self.dim, dtype=np.float32), ids=samples)
| 35.152381
| 113
| 0.626389
|
c055d7d0ae6d32d0d7c8e101112542af5f44a91c
| 5,677
|
py
|
Python
|
python/adversary_result.py
|
tudelft-cda-lab/FATE
|
5efaa0255322c2346a78081743fbccc3159f21ed
|
[
"MIT"
] | null | null | null |
python/adversary_result.py
|
tudelft-cda-lab/FATE
|
5efaa0255322c2346a78081743fbccc3159f21ed
|
[
"MIT"
] | null | null | null |
python/adversary_result.py
|
tudelft-cda-lab/FATE
|
5efaa0255322c2346a78081743fbccc3159f21ed
|
[
"MIT"
] | null | null | null |
import math
from os import listdir
from os.path import isfile, join
from typing import List
import matplotlib.pyplot as plt
import numpy as np
from constants import SMALL_PERTURBATION_THRESHOLD
class AdversaryResult:
def __init__(self, filename, fuzzed_features, fuzzed_class, original_features, original_class, check_num,
fuzzer_probs):
self.filename = filename
self.fuzzed_features = fuzzed_features
self.fuzzed_class = fuzzed_class
self.original_features = original_features
self.original_class = original_class
self.check_num = check_num
self.fuzzer_probs = fuzzer_probs
self.valid = True
@property
def key(self):
return f'{self.original_class}->{self.fuzzed_class}'
def diffs(self):
return np.abs(self.fuzzed_features - self.original_features)
@staticmethod
def differences(a, b):
return np.abs(b - a)
def avg_diff(self):
return np.average(self.diffs())
def big_diffs(self):
return [e for e in self.diffs() if e > SMALL_PERTURBATION_THRESHOLD]
def avg_diff_big(self):
return np.average(self.big_diffs())
def dist(self):
return math.dist(self.fuzzed_features, self.original_features)
def l_0_big(self):
return len(self.big_diffs())
def l_0(self):
# Number of elements that is different in a vector / non-zero elements.
return len([e for i, e in enumerate(self.fuzzed_features) if e != self.original_features[i]])
def l_1(self):
# Sum of absolute difference
return sum(self.diffs())
def l_2(self):
# Euclidean distance
return self.dist()
def l_inf(self):
# The largest (difference) of any element of the vectors
return max(self.diffs().tolist())
def l_inf_other(self, other):
return max(self.differences(self.original_features, other))
def result_for_norm(self, norm):
return eval("self."+norm+"()")
def print_norms(self):
print('l_0: ', self.l_0())
print('l_0_b: ', self.l_0_big())
print('l_1: ', self.l_1())
print('l_2: ', self.l_2())
print('l_inf: ', self.l_inf())
def visualise_image(self):
plt.title(f'Original class is {self.original_class}, adversary class is {self.fuzzed_class}')
plt.imshow(self.pixels(self.fuzzed_features), cmap='gray')
plt.show()
def image_compare(self):
fig = plt.figure()
fig.suptitle(f'Original Class is {self.original_class}, adversary class is {self.fuzzed_class}')
fig.add_subplot(1, 2, 1) # Left
plt.imshow(self.pixels(self.fuzzed_features), cmap='gray')
fig.add_subplot(1, 2, 2) # Right
plt.imshow(self.pixels(self.original_features), cmap='gray')
plt.show()
def investigate(self, actual_model, predicted_probs, fatal=True):
print('ERROR for file: ', self.filename)
print('Generated adv example: ', self.fuzzed_features)
print('Fuzzer probs: ', self.fuzzer_probs)
print('Model probs: ', predicted_probs)
print('Diff: ', self.fuzzer_probs-predicted_probs)
print('Avg diff: ', np.average(self.fuzzer_probs-predicted_probs))
print('Max diff: ', np.max(self.fuzzer_probs-predicted_probs))
err_str = (f"The trained model predicted {actual_model}, but the fuzzer predicts {self.fuzzed_class}. "
f"Skipping adv example.")
if fatal:
raise ValueError(err_str)
print(err_str)
@staticmethod
def pixels(features):
return features.reshape((28, 28))
@staticmethod
def parse_results(adv_path, original_fs: dict, num_features: int) -> List['AdversaryResult']:
"""
original_fs is a dict[int(check_num) -> original_features]
"""
adv_files = [join(adv_path, f) for f in listdir(adv_path) if isfile(join(adv_path, f)) and 'probasmall' not in f]
print(f"{len(adv_files)} adversarial examples were found.")
adversary_results = []
for adv_filename in adv_files:
with open(adv_filename, 'r') as file:
contents = file.read()
try:
# Try block for the case that two processes were writing to the same file
splitted = contents.split(',')
fuzzed_features = np.array([float(e) for e in splitted[:num_features]], dtype='float32')
verify(fuzzed_features) # may raise ValueError
original_class = int(splitted[num_features])
fuzzed_class = int(splitted[num_features+1])
check_num = int(splitted[num_features+2])
original_features = np.array(original_fs[check_num], dtype='float32')
fuzzer_probs = np.array([float(e) for e in splitted[num_features+3:]]) # Fuzzer probabilities
verify(fuzzer_probs) # may raise ValueError
adversary_results.append(
AdversaryResult(adv_filename, fuzzed_features, fuzzed_class, original_features, original_class,
check_num, fuzzer_probs)
)
except (ValueError, IndexError):
continue
if len(adv_files) != len(adversary_results):
print(f'Warning!! {len(adv_files) - len(adversary_results)} files could not be parsed')
return adversary_results
def verify(features):
"""
Verifies that all features are finite i.e. do not contain NaN, inf, -inf values.
"""
if not np.isfinite(features).all():
raise ValueError
| 34.828221
| 121
| 0.630439
|
c953cd4534084adceff45349a7bab137a46c1ef4
| 45,957
|
py
|
Python
|
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
|
joschu/Theano
|
a53650487647b25f4258deef9d91ec950902c610
|
[
"BSD-3-Clause"
] | 1
|
2020-12-08T02:23:42.000Z
|
2020-12-08T02:23:42.000Z
|
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
|
joschu/Theano
|
a53650487647b25f4258deef9d91ec950902c610
|
[
"BSD-3-Clause"
] | null | null | null |
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
|
joschu/Theano
|
a53650487647b25f4258deef9d91ec950902c610
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests for GPU convolution
"""
import sys
import time
import unittest
import traceback
import numpy
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
imported_scipy_convolve2d = False
try:
from scipy.signal import convolve2d
imported_scipy_convolve2d = True
except ImportError:
pass
import theano
from theano import tensor
from theano.gof.python25 import any
from theano.tests.unittest_tools import seed_rng, assert_allclose
# Skip test if cuda is not available.
from theano.sandbox import cuda
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
from theano.sandbox.cuda.dnn import GpuDnnConv, GpuDnnConvBase, dnn_conv
#needed as the gpu conv don't have a perform implementation.
if theano.config.mode == 'FAST_COMPILE':
theano_mode = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
theano_mode = theano.compile.mode.get_default_mode().including('gpu')
device_id = theano.sandbox.cuda.use.device_number
if device_id is None:
cuda.shared_constructor(numpy.zeros(2, dtype='float32'))
device_id = theano.sandbox.cuda.use.device_number
if device_id is None:
cuda.use("gpu",
force=False,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False,
test_driver=True)
device_id = theano.sandbox.cuda.use.device_number
cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray
device_prop = cuda_ndarray.device_properties(device_id)
def py_conv_valid_numpy(img, kern):
assert img.shape[1] == kern.shape[1]
outshp = (img.shape[0], kern.shape[0],
img.shape[2] - kern.shape[2] + 1,
img.shape[3] - kern.shape[3] + 1)
out = numpy.zeros(outshp, dtype='float32')
for b in xrange(out.shape[0]):
for k in xrange(out.shape[1]):
for rr in xrange(out.shape[2]):
for cc in xrange(out.shape[3]):
#rr, cc is the upper-left corner of img patches
imgpatch = img[b, :, rr:rr + kern.shape[2],
cc:cc + kern.shape[3]]
innerprod = (imgpatch[:, ::-1, ::-1] *
kern[k, :, :, :]).sum()
out[b, k, rr, cc] = innerprod
return out
def py_conv_pad_img(img, pad_h, pad_w):
assert pad_h >= 0 and pad_w >= 0
padded_img = numpy.zeros(
(img.shape[0], img.shape[1],
pad_h * 2 + img.shape[2], pad_w * 2 + img.shape[3]),
dtype=img.dtype)
padded_img[:, :,
pad_h: pad_h + img.shape[2],
pad_w: pad_w + img.shape[3]] = img
return padded_img
def py_conv_full_numpy(img, kern):
# manually pad the img with zeros all around, and then run it
# through py_conv_valid
padded_img = py_conv_pad_img(img, kern.shape[2] - 1, kern.shape[3] - 1)
return py_conv_valid_numpy(padded_img, kern)
def py_conv(img, kern, mode, subsample):
"""
use a scipy or numpy implementation depending is scipy is available.
The scipy version is faster.
"""
if isinstance(mode, int):
mode = (mode, mode)
if isinstance(mode, tuple):
pad_h, pad_w = map(int, mode)
img = py_conv_pad_img(img, pad_h, pad_w)
mode = 'valid'
if imported_scipy_convolve2d:
return py_conv_scipy(img, kern, mode, subsample)
elif mode == 'valid':
return py_conv_valid_numpy(img, kern)[:, :, ::subsample[0],
::subsample[1]]
elif mode == 'full':
return py_conv_full_numpy(img, kern)[:, :, ::subsample[0],
::subsample[1]]
else:
raise Exception("Can't execute this kernel.")
def py_conv_scipy(img, kern, mode, subsample):
assert img.shape[1] == kern.shape[1]
if mode == 'valid':
outshp = (img.shape[0], kern.shape[0],
img.shape[2] - kern.shape[2] + 1,
img.shape[3] - kern.shape[3] + 1)
else:
outshp = (img.shape[0], kern.shape[0],
img.shape[2] + kern.shape[2] - 1,
img.shape[3] + kern.shape[3] - 1)
out = numpy.zeros(outshp, dtype='float32')
for b in xrange(out.shape[0]):
for k in xrange(out.shape[1]):
for s in xrange(img.shape[1]):
#convolve2d or correlate
out[b, k, :, :] += convolve2d(img[b, s, :, :],
kern[k, s, :, :],
mode)
return out[:, :, ::subsample[0], ::subsample[1]]
def _params_allgood_header():
print "ishape kshape #Mflops CPU Mflops GPU Mflops Speedup"
def _params_allgood(ishape, kshape, mode, subsample=(1, 1), img_stride=(1, 1),
kern_stride=(1, 1), version=-1, verbose=0, random=True,
print_=None, id=None, rtol=1e-5, atol=1e-8,
nb_iter=0, ones=False, compile_kshp=None,
theano_mode=None, cls=None):
#
# This function is the core of several of the big unit-test drivers,
# but it can also be used very directly on its own to test a specific
# kind of convolution.
#
# See `test_example` (above) for an example of how to use this directly.
#
# :param kshape: (4d)The shape of the kernel at run time.
# :param compile_kshp: (2d) hardcode the shape of the kernel in
# the generated code This is supposed to be
# faster, but we need to check That we raise
# an error if the input have the wrong shape.
#
if ones:
assert not random
npy_img = theano._asarray(numpy.ones(ishape), dtype='float32')
npy_kern = -theano._asarray(numpy.ones(kshape), dtype='float32')
elif random:
npy_img = theano._asarray(numpy.random.rand(*ishape) + 1,
dtype='float32')
npy_kern = theano._asarray(numpy.random.rand(*kshape) - 2,
dtype='float32')
else:
npy_img = theano._asarray(numpy.arange(
numpy.prod(ishape)).reshape(ishape), dtype='float32') + 1
npy_kern = -(theano._asarray(numpy.arange(
numpy.prod(kshape)).reshape(kshape), dtype='float32') + 1)
img = cuda_ndarray.CudaNdarray(npy_img)
kern = cuda_ndarray.CudaNdarray(npy_kern)
#we take the stride after the transfert as we make c_contiguous
#data on the GPU.
if img_stride != (1, 1):
img = img[:, :, ::img_stride[0], ::img_stride[1]]
npy_img = npy_img[:, :, ::img_stride[0], ::img_stride[1]]
if kern_stride != (1, 1):
kern = kern[:, :, ::kern_stride[0], ::kern_stride[1]]
npy_kern = npy_kern[:, :, ::kern_stride[0], ::kern_stride[1]]
t2 = None
t0 = time.time()
cpuval = py_conv(npy_img, npy_kern, mode, subsample)
t1 = time.time()
i = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_img.shape])()
k = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_kern.shape])()
op = theano.sandbox.cuda.blas.GpuConv(border_mode=mode,
subsample=subsample,
version=version,
verbose=verbose,
kshp=compile_kshp)(i, k)
assert [(sh == 1) is br for
sh, br in zip(cpuval.shape[:2], op.type.broadcastable[:2])]
f = theano.function([i, k], op, mode=theano_mode)
if cls is not None:
assert any([isinstance(node.op, cls)
for node in f.maker.fgraph.toposort()]), "Cannot find class %r in %r" % (cls, f.maker.fgraph.toposort())
gpuval = f(img, kern)
t2 = time.time()
for i in range(nb_iter):
gpuval2 = f(img, kern)
assert (numpy.asarray(gpuval) == numpy.asarray(gpuval2)).all()
gpuval = numpy.asarray(gpuval)
assert gpuval.shape == cpuval.shape, ("shape mismatch", gpuval.shape, cpuval.shape)
assert_allclose(cpuval, gpuval, rtol=rtol, atol=atol)
assert numpy.all(numpy.isfinite(gpuval)), gpuval
if (t2 is not None):
if mode == 'valid':
approx_fp = cpuval.size * ishape[1] * kshape[2] * kshape[3] * 2
else:
approx_fp = (ishape[0] * kshape[0] * kshape[1] * kshape[2] *
kshape[3] * ishape[2] * ishape[3] * 2)
approx_fp /= 1e6
cpu_mflops = approx_fp / (t1 - t0)
gpu_mflops = approx_fp / (t2 - t1)
if verbose > 0:
print >> sys.stdout, '%15s' % str(ishape), '%15s' % str(kshape),
print >> sys.stdout, '%12.5f %7.2f %7.2f %7.1f' % (approx_fp,
cpu_mflops, gpu_mflops, (t1 - t0) / (t2 - t1))
def exec_conv(version, shapes, verbose, random, mode,
print_=None, rtol=1e-5, ones=False,
theano_mode=theano_mode, cls=None):
if verbose > 0:
_params_allgood_header()
for ver in version:
for id, (ishape, kshape, subshape,
istride, kstride) in enumerate(shapes):
yield (_params_allgood, ishape, kshape, mode, subshape,
istride, kstride, ver, verbose, random, print_, id,
rtol, 1e-8, 0, ones, None, theano_mode, cls)
def get_basic_shapes():
#basic test of image and kernel shape
return [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
((1, 1, 2, 2), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1)),
((1, 1, 3, 3), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1)),
#basic test for unsquare kernel and image
((1, 1, 2, 4), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1)),
((1, 1, 3, 4), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1)),
((1, 1, 4, 3), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1)),
((1, 1, 4, 4), (1, 1, 3, 2), (1, 1), (1, 1), (1, 1)),
((1, 1, 4, 4), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))]
def get_shapes(imshp=(1, 1), kshp=(1, 1), subsample=(1, 1),
img_stride=(1, 1), kern_stride=(1, 1)):
""" all possible case if we one or more of stack size, batch size,
nkern. We use the gived image shape, kernel shape and subsmaple
shape."""
return [
#stack only
((1, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),
#batch only
((3, 1) + imshp, (1, 1) + kshp, subsample, img_stride, kern_stride),
#nkern only
((1, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),
#batch and nkern
((3, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),
#batch and stack
((3, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),
#stack and nkern
((1, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),
#batch, nkern and stack
((2, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),
#batch, nkern and stack
((3, 2) + imshp, (4, 2) + kshp, subsample, img_stride, kern_stride)
]
def get_shapes2(scales_img=(1, 1), scales_kern=(1, 1), subsample=(1, 1),
img_stride=(1, 1), kern_stride=(1, 1)):
#basic test of stack, batch and nkern paramter
shapes = get_shapes((1 * scales_img[0], 1 * scales_img[1]),
(1 * scales_kern[0], 1 * scales_kern[1]),
subsample, img_stride, kern_stride)
#basic test of stack, batch and nkern paramter with image and kernel shape
shapes += get_shapes((2 * scales_img[0], 2 * scales_img[1]),
(2 * scales_kern[0], 2 * scales_kern[1]),
subsample, img_stride, kern_stride)
#basic test of stack, batch and nkern paramter with image and kernel shape
shapes += get_shapes((3 * scales_img[0], 3 * scales_img[1]),
(2 * scales_kern[0], 2 * scales_kern[1]),
subsample, img_stride, kern_stride)
#basic test of stack, batch and nkern paramter with not square image.
shapes += get_shapes((4 * scales_img[0], 3 * scales_img[1]),
(2 * scales_kern[0], 2 * scales_kern[1]),
subsample, img_stride, kern_stride)
#basic test of stack, batch and nkern paramter with not square image.
shapes += get_shapes((3 * scales_img[0], 4 * scales_img[1]),
(2 * scales_kern[0], 2 * scales_kern[1]),
subsample, img_stride, kern_stride)
#basic test of stack, batch and nkern paramter with not square kernel.
shapes += get_shapes((4 * scales_img[0], 4 * scales_img[1]),
(3 * scales_kern[0], 2 * scales_kern[1]),
subsample, img_stride, kern_stride)
#basic test of stack, batch and nkern paramter with not square kernel.
shapes += get_shapes((4 * scales_img[0], 4 * scales_img[1]),
(2 * scales_kern[0], 3 * scales_kern[1]),
subsample, img_stride, kern_stride)
return shapes
def get_valid_shapes():
# img shape, kern shape, subsample shape
shapes = get_basic_shapes()
shapes += get_shapes2()
#test image stride
shapes += get_shapes2(scales_img=(2, 2), img_stride=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(-1, -1))
shapes += get_shapes2(scales_img=(2, 2), kern_stride=(-1, -1))
#test subsample done in a separate fct
shapes += [
#other test
((2, 1, 2, 2), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
, ((1, 1, 4, 4), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
, ((4, 1, 20, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
, ((3, 2, 8, 8), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize
, ((3, 2, 8, 6), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image
, ((3, 2, 8, 6), (4, 2, 4, 3), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image, non-square kern
, ((3, 2, 8, 6), (4, 2, 4, 6), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize ,non-square image, non-square kern, kernsize==imgsize on one dim
, ((16, 5, 64, 64), (8, 5, 8, 8), (1, 1), (1, 1), (1, 1)) # a big one
, ((16, 1, 28, 28), (20, 1, 5, 5), (1, 1), (1, 1), (1, 1)) # MNIST LeNET layer 1
, ((20, 16, 32, 32), (1, 16, 28, 28), (1, 1), (1, 1), (1, 1)) # layer 1 backprop to weights
, ((60,20,28,28), (10,20,5,5), (1, 1), (2,2), (1, 1))#added a test case that fail from test_nnet.py.test_conv_nnet2
, ((10,5,28,28), (10,5,5,5), (1, 1), (2,2), (1, 1))#test precedent but reduced that triger the error
#Test more than maxThreadsDim0
, ((2,4,13,1050), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
, ((2,4,1050,13), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
]
shapes += [ ((60,1,28,28),(20,1,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 1 layers
, ((60,20,12,12),(30,20,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 2 layers
, ((60,30,8,8),(20,30,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 1 full
, ((20,60,12,12),(30,60,8,8), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
# , ((1,60,28,28),(20,60,24,24), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
, ((10,1,64,64),(20,1,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 1 layers
, ((10,20,29,29),(30,20,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 2 layers
, ((10,30,23,23),(20,30,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 full
# , ((20,10,29,29),(30,10,23,23), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 1
# , ((1,10,64,64),(20,10,58,58), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 2
]
return shapes
def test_valid_0_2():
seed_rng()
shapes = get_valid_shapes()
version = [0, 2]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[3] > device_prop['maxThreadsDim0']:
continue
if ishape[1] > 1:
continue
if ((numpy.prod(ishape[2:]) + numpy.prod(kshape[2:])) * 4 >
(16 * 1024 - 150)):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
for t in exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5):
yield t
def test_valid_1_3_11_12():
seed_rng()
shapes = get_valid_shapes()
version = [1, 3, 11, 12]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[3] > device_prop['maxThreadsDim0']:
continue
if ((numpy.prod(ishape[2:]) + numpy.prod(kshape[2:])) * 4 >
(16 * 1024 - 150)):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
for t in exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5):
yield t
def test_valid_4():
seed_rng()
shapes = get_valid_shapes()
version = [4]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[3] > device_prop['maxThreadsDim0']:
continue
if ishape[1] > 1:
continue
if ((kshape[2] * ishape[3] * 4 + numpy.prod(kshape[2:]) * 4) >
(16 * 1024 - 150)):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
for t in exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5):
yield t
def test_valid_5():
seed_rng()
shapes = get_valid_shapes()
version = [5]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[3] > device_prop['maxThreadsDim0']:
continue
if ((kshape[2] * ishape[3] * 4 + numpy.prod(kshape[2:]) * 4) >
(16 * 1024 - 150)):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
for t in exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5):
yield t
def test_valid_7_8_13():
seed_rng()
shapes = get_valid_shapes()
# This is to test the "new" lower shared memory usage.
shapes.append(((10, 30, 60, 60), (20, 30, 40, 40),
(1, 1), (1, 1), (1, 1)))
version = [7, 8, 13]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[2] * oshape[3] > device_prop['maxThreadsDim0']:
continue
if max(numpy.prod(ishape[2:]) * 4 + 2 * kshape[3] * 4,
oshape[2] * oshape[3] * 4 * 2) > (16 * 1024 - 150):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
for t in exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5):
yield t
def test_valid_9_10():
seed_rng()
shapes = get_valid_shapes()
version = [9, 10]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[3] > device_prop['maxThreadsDim0']:
continue
if (kshape[3] * 4 + ishape[3]) > (16 * 1024 - 150):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
for t in exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5):
yield t
def _test_valid(cls, mode=None, extra_shapes=[], version=[-1]):
seed_rng()
shapes = get_valid_shapes()
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes += extra_shapes
return exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5,
theano_mode=mode, cls=cls)
def test_valid():
for t in _test_valid(None,
mode=theano_mode,
version=[-2, -1, 6]):
yield t
def test_gemm_valid():
extra_shapes = get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
extra_shapes += get_shapes2(scales_kern=(2, 2), kern_stride=(2, 2))
for t in _test_valid(cuda.blas.BaseGpuCorrMM,
mode=theano_mode.excluding("cudnn"),
extra_shapes=extra_shapes):
yield t
def test_dnn_valid():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
for t in _test_valid(GpuDnnConvBase, mode=theano_mode.including("cudnn")):
yield t
def test_default_conv():
"""Just test that we introduce the right GPU convolution
version.
"""
img = theano.tensor.ftensor4()
fil = theano.tensor.ftensor4()
c = theano.tensor.nnet.conv2d(img, fil)
f = theano.function([img, fil], c, mode=theano_mode)
if cuda.dnn.dnn_available():
assert any([isinstance(a.op, GpuDnnConv)
for a in f.maker.fgraph.apply_nodes])
else:
assert any([isinstance(a.op, cuda.blas.GpuCorrMM)
for a in f.maker.fgraph.apply_nodes])
mode = theano_mode.excluding('local_conv_dnn', 'local_conv_gemm')
f = theano.function([img, fil], c, mode=mode)
assert any([isinstance(a.op, cuda.blas.GpuConv)
for a in f.maker.fgraph.apply_nodes])
mode = theano_mode.excluding('conv_dnn', 'conv_gemm')
f = theano.function([img, fil], c, mode=mode)
assert any([isinstance(a.op, cuda.blas.GpuConv)
for a in f.maker.fgraph.apply_nodes])
def _test_full(cls, mode=None, version=[-1], extra_shapes=[]):
seed_rng()
shapes = get_basic_shapes()
shapes += get_shapes2()
#test image stride
shapes += get_shapes2(scales_img=(2, 2), img_stride=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(-1, -1))
shapes += get_shapes2(scales_img=(2, 2), kern_stride=(-1, -1))
#test subsample done in a separate fct
shapes += [
#other test
((2, 1, 2, 2), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
, ((1, 1, 4, 4), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
, ((4, 1, 20, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
, ((3, 2, 8, 8), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize
, ((3, 2, 8, 6), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image
, ((3, 2, 8, 6), (4, 2, 4, 3), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image, non-square kern
, ((3, 2, 8, 6), (4, 2, 4, 6), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize ,non-square image, non-square kern, kernsize==imgsize on one dim
, ((16, 5, 64, 64), (8, 5, 8, 8), (1, 1), (1, 1), (1, 1)) # a big one
, ((16, 1, 28, 28), (20, 1, 5, 5), (1, 1), (1, 1), (1, 1)) # MNIST LeNET layer 1
, ((20, 16, 32, 32), (1, 16, 28, 28), (1, 1), (1, 1), (1, 1)) # layer 1 backprop to weights
#other test
, ((3, 1, 1, 1), (2, 1, 5, 3), (1, 1), (1, 1), (1, 1))#kernel bigger then image
, ((3, 2, 1, 1), (4, 2, 1, 1), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 2, 6), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 8, 6), (1, 1), (1, 1), (1, 1))#kernel bigger then image
, ((4, 2, 10, 10), (3, 2, 2, 12), (1, 1), (1, 1), (1, 1))
]
shapes += [
# ((60,1,28,28),(20,1,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 1 layers
# , ((60,20,12,12),(30,20,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 2 layers
((60,30,8,8),(20,30,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 1 full
# , ((20,60,12,12),(30,60,8,8), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
# , ((1,60,28,28),(20,60,24,24), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
# , ((10,1,64,64),(20,1,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 1 layers
# , ((10,20,29,29),(30,20,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 2 layers
, ((10,30,23,23),(20,30,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 full
# , ((20,10,29,29),(30,10,23,23), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 1
# , ((1,10,64,64),(20,10,58,58), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 2
#Test more than maxThreadsDim0
, ((2,4,13,1050), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
, ((2,4,1050,13), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
, ((1,1,44800,1), (6,1,1,1), (1, 1), (1, 1), (1, 1))#This caused crash
]
verbose = 0
random = True
shapes += extra_shapes
return exec_conv(version, shapes, verbose, random, 'full',
theano_mode=mode, cls=cls)
def test_full():
for t in _test_full(None,
mode=theano_mode,
version=[-2, -1, 0, 1, 2, 3, 4, 5]):
yield t
def test_gemm_full():
for t in _test_full(cuda.blas.BaseGpuCorrMM,
mode=theano_mode.excluding("cudnn")):
yield t
def test_dnn_full():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
for t in _test_full(GpuDnnConvBase, mode=theano_mode.including("cudnn")):
yield t
def _test_subsample(cls, mode, version_valid=[-1], version_full=[-1]):
seed_rng()
shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
]
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))
# We put only the version that implement the subsample to make the
# test faster.
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
for t in exec_conv(version_valid, shapes, verbose, random, 'valid',
print_=print_, ones=ones,
theano_mode=mode, cls=cls):
yield t
for t in exec_conv(version_full, shapes, verbose, random, 'full',
print_=print_, ones=ones,
theano_mode=mode, cls=cls):
yield t
def test_subsample():
for t in _test_subsample(None, theano_mode,
version_valid=[-2, -1, 1, 3, 11, 12],
version_full=[-2, -1]):
yield t
def test_gemm_subsample():
for t in _test_subsample(cuda.blas.BaseGpuCorrMM,
theano_mode.excluding("cudnn")):
yield t
def test_dnn_subsample():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
for t in _test_subsample(GpuDnnConvBase, theano_mode.including('cudnn')):
yield t
class TestConv2DGPU(unittest.TestCase):
conv_ops = (cuda.blas.GpuConv,
cuda.dnn.GpuDnnConvBase,
cuda.blas.BaseGpuCorrMM)
def test_logical_shapes(self):
seed_rng()
for stride in range(1, 4):
kshp = (10, 2, 10, 10)
featshp = (3, 10, 11, 11)
a = tensor.ftensor4()
A = tensor.ftensor4()
# Need to transpose first two dimensions of kernel, and reverse
# index kernel image dims (for correlation)
kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])
featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
featshp[3] * stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
#print featshp, kshp_rotated, featshp_logical[1:], kshp[2:]
image_estimate = tensor.nnet.conv2d(a, kernel_rotated,
border_mode='full',
image_shape=featshp,
filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],
kshp_logical=kshp[2:])
func = theano.function([a, A], image_estimate, mode=theano_mode)
#theano.printing.debugprint(func,)
assert any([isinstance(node.op, self.conv_ops)
for node in func.maker.fgraph.toposort()])
a_in = numpy.random.randn(*featshp).astype("float32")
A_in = numpy.random.randn(*kshp).astype("float32")
func(a_in, A_in)
def test_invalid_input_shape(self):
"""
Tests that when the shape gived at build time is not the same as
run time we raise an error
"""
seed_rng()
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
global theano_mode
theano_mode_orig = theano_mode
try:
if theano.config.mode in ['DebugMode', 'DEBUG_MODE']:
theano_mode = theano.compile.mode.get_mode(
'FAST_RUN').including('gpu')
for mode in ['valid', 'full']:
for shapes in [((3, 2, 8, 8), (4, 2, 5, 5), (8, 8)),
((3, 2, 8, 8), (4, 2, 5, 5), (5, 8)),
#((3, 2, 8, 8), (4, 2, 5, 5), (8, 5)),
# We use only the number of columns.
]:
self.assertRaises(ValueError, _params_allgood,
shapes[0], shapes[1],
verbose=verbose, random=random,
mode=mode,
print_=print_, ones=ones,
compile_kshp=shapes[2])
finally:
theano_mode = theano_mode_orig
class TestConvWithPadding(object):
"""test conv ops that support arbitrary padding via border_mode
note that in order to make the yield work, we can not subclass from
unittest.TestCase
"""
@staticmethod
def gemm_conv_op(img, kern, border_mode):
kern = theano.sandbox.cuda.basic_ops.gpu_contiguous(
kern[:, :, ::-1, ::-1])
y = theano.sandbox.cuda.blas.GpuCorrMM(border_mode=border_mode)(
img, kern)
return y
conv_ops = []
@classmethod
def setup_class(cls):
cls.conv_ops.append(cls.gemm_conv_op)
if cuda.dnn.dnn_available():
cls.conv_ops.append(cuda.dnn.dnn_conv)
def test_invalid_arg(self):
img = theano._asarray(numpy.empty((1, 1, 1, 1)), dtype='float32')
kern = theano._asarray(numpy.empty((1, 1, 1, 1)), dtype='float32')
for i in self.conv_ops:
assert_raises(ValueError, i, img, kern,
border_mode=(-1, 0))
assert_raises(ValueError, i, img, kern,
border_mode=(0, -1))
assert_raises(ValueError, i, img, kern,
border_mode='not border')
def _run_onecase(self, img_shape, kern_shape, padding, op):
npy_img = numpy.random.rand(*img_shape).astype('float32')
npy_kern = numpy.random.rand(*kern_shape).astype('float32')
img = theano._asarray(npy_img, dtype='float32')
kern = theano.shared(npy_kern)
border_mode = padding
cpuval = py_conv(npy_img, npy_kern, border_mode, (1, 1))
X = tensor.ftensor4()
Y = op(X, kern, border_mode=border_mode)
func = theano.function([X], Y, mode=theano_mode)
gpuval = numpy.asarray(func(img))
assert_allclose(cpuval, gpuval, rtol=1e-5, atol=1e-5)
def test_numeric_value(self):
params = [
((5, 10, 4, 4), (12, 10, 4, 4), (2, 1)),
((5, 10, 8, 8), (12, 10, 4, 4), 3),
((5, 10, 6, 8), (12, 10, 3, 4), 'full'),
((5, 10, 9, 6), (12, 10, 9, 4), 'valid')
]
for img_shape, kern_shape, padding in params:
for op in self.conv_ops:
yield self._run_onecase, img_shape, kern_shape, padding, op
def gemm_directly(bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsx, subsy,
direction):
ishape = (bs, ch, rImg1, rImg2)
kshape = (nf, ch, rFlt1, rFlt2)
subsample = (subsx, subsy)
npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
if direction == 'fprop':
i = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_img.shape])()
k = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_kern.shape])()
cpuval = py_conv(npy_img, npy_kern, 'valid', subsample)
op = theano.sandbox.cuda.blas.GpuCorrMM(border_mode='valid',
subsample=subsample)(i, k)
f = theano.function([i, k], op, mode=theano_mode)
gpuval = f(npy_img, npy_kern[:,:,::-1,::-1])
elif direction == 'bprop img':
i = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in
npy_kern.transpose(1, 0, 2, 3).shape])()
k = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_img.shape])()
cpuval = py_conv(npy_img, npy_kern, 'full', subsample)
op = theano.sandbox.cuda.blas.GpuCorrMM_gradInputs(
border_mode='valid', subsample=subsample)(i, k)
f = theano.function([i, k], op, mode=theano_mode)
gpuval = f(npy_kern.transpose(1, 0, 2, 3), npy_img)
elif direction == 'bprop kern':
i = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in
npy_img.transpose(1, 0, 2, 3).shape])()
k = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in
npy_kern.transpose(1, 0, 2, 3).shape])()
cpuval = py_conv(npy_img, npy_kern, 'valid', subsample)
op = theano.sandbox.cuda.blas.GpuCorrMM_gradWeights(
border_mode='valid', subsample=subsample)(i, k)
f = theano.function([i, k], op, mode=theano_mode)
gpuval = numpy.array(f(
npy_img.transpose(1, 0, 2, 3),
npy_kern.transpose(1, 0, 2, 3)[:,:,::-1,::-1])).transpose(
1, 0, 2, 3)
assert_allclose(cpuval, gpuval, rtol=1e-4)
def test_gemm_directly():
for bs in range(1, 5):
for ch in range(1,4):
for nf in range(1,4):
for rImg1 in range(5, 9):
for rImg2 in range(5, 9):
for rFlt1 in range(2, 4):
for rFlt2 in range(2, 4):
for direction in ['bprop img', 'bprop kern']:
yield (gemm_directly, bs, ch, nf, rImg1,
rImg2, rFlt1, rFlt2, 1, 1,
direction)
for subsx in range(1, 3):
for subsy in range(1, 3):
yield (gemm_directly, bs, ch, nf,
rImg1, rImg2, rFlt1, rFlt2,
subsx, subsy, 'fprop')
def gemm_op(mode, subsample):
return theano.sandbox.cuda.blas.GpuCorrMM(mode, subsample)
def dnn_op(mode, subsample):
def f(img, kern):
return dnn_conv(img, kern, border_mode=mode, conv_mode='cross',
subsample=subsample)
return f
def conv_grad(mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsample, op):
ishape = (bs, ch, rImg1, rImg2)
kshape = (nf, ch, rFlt1, rFlt2)
npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
i = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_img.shape])()
k = cuda.CudaNdarrayType(
broadcastable=[sh == 1 for sh in npy_kern.shape])()
# TODO: also test custom pad values
corr_op = op(mode, subsample)(i, k)
# try to compile reference implementation without shape,
# so we don't have to compile hundreds of versions
conv_op = tensor.nnet.conv2d(i, k[:,:,::-1,::-1],
border_mode=mode, subsample=subsample)
try:
conv_op_di = theano.grad(conv_op.sum(), i)
conv_op_dk = theano.grad(conv_op.sum(), k)
except Exception:
# compile with shape information only when needed
conv_op = tensor.nnet.conv2d(i, k[:,:,::-1,::-1],
ishape, kshape, mode, subsample)
conv_op_di = theano.grad(conv_op.sum(), i)
conv_op_dk = theano.grad(conv_op.sum(), k)
corr_op_di = theano.grad(corr_op.sum(), i)
corr_op_dk = theano.grad(corr_op.sum(), k)
outputs = [corr_op, conv_op,
corr_op_di, conv_op_di,
corr_op_dk, conv_op_dk]
try:
conv_op_dik = theano.grad(conv_op_di.sum(), k)
conv_op_dki = theano.grad(conv_op_dk.sum(), i)
corr_op_dik = theano.grad(corr_op_di.sum(), k)
corr_op_dki = theano.grad(corr_op_dk.sum(), i)
outputs.extend([corr_op_dik, conv_op_dik,
corr_op_dki, conv_op_dki])
except Exception:
# skip if the reference implementation can't do it
pass
f = theano.function([i, k], outputs, mode=theano_mode.excluding('conv_dnn', 'conv_gemm'))
allvals = f(npy_img, npy_kern)
for a, b, oa, ob, p in zip(allvals[::2], allvals[1::2],
outputs[::2], outputs[1::2],
('top', 'dtop/dbottom', 'dtop/dweight',
'dtop/dbottom/dweight', 'dtop/dweight/dbottom')):
assert oa.type.broadcastable[:2] == ob.type.broadcastable[:2]
assert_allclose(a, b, rtol=1e-4)
def test_conv_grads():
if cuda.device_properties(cuda.active_device_number())['major'] < 3:
ops = [gemm_op]
else:
ops = [gemm_op, dnn_op]
for mode in 'valid', 'full':
for bs in [1, 5]:
for ch in [4]:
for nf in [3]:
for rImg1 in [2, 5]:
for rImg2 in [2, 8]:
for rFlt1 in [1, 2]:
for rFlt2 in [1, 2]:
for subsample in (1, 1), (1, 2), (2, 2):
for op in ops:
yield (conv_grad, mode, bs, ch, nf,
rImg1, rImg2, rFlt1, rFlt2,
subsample, op)
def benchmark():
shapes_valid = [
#test_lenet_28 shape
((20, 60,12,12), (30,60,8,8), (1, 1), (1, 1), (1, 1))#valid
,((60, 20,12,12), (30,20,5,5), (1, 1), (1, 1), (1, 1))#valid
,((60, 1,28,28), (20,1,5,5), (1, 1), (1, 1), (1, 1))#valid
,((1, 60,28,28), (20,60,24,24), (1, 1), (1, 1), (1, 1))#valid
#test_lenet_32 shape
,((20, 60,14,14), (30,60,10,10), (1, 1), (1, 1), (1, 1))#valid
,((60, 20,14,14), (30,20,5,5), (1, 1), (1, 1), (1, 1))#valid
,((60, 1,32,32), (20,1,5,5), (1, 1), (1, 1), (1, 1))#valid
,((1, 60,32,32), (20,60,28,28), (1, 1), (1, 1), (1, 1))#valid
#test_lenet_64 shape
,((10, 20,29,29), (30,20,7,7), (1, 1), (1, 1), (1, 1))#valid
,((20, 10,29,29), (30,10,23,23), (1, 1), (1, 1), (1, 1))#valid
,((10, 1,64,64), (20,1,7,7), (1, 1), (1, 1), (1, 1))#valid
,((1, 10,64,64), (20,10,58,58), (1, 1), (1, 1), (1, 1))#valid
#test_lenet_108 shape
,((10, 20,51,51), (30,20,7,7), (1, 1), (1, 1), (1, 1))#valid
,((20, 10,51,51), (30,10,45,45), (1, 1), (1, 1), (1, 1))#valid
,((10, 1,108,108), (20,1,7,7), (1, 1), (1, 1), (1, 1))#valid
,((1, 10,108,108), (20,10,102,102), (1, 1), (1, 1), (1, 1))#valid
#test_lenet_256 shape
,((2, 20,124,124), (30,20,9,9), (1, 1), (1, 1), (1, 1))#valid
,((20, 2,124,124), (30,2,116,116), (1, 1), (1, 1), (1, 1))#valid
,((2, 1,256,256), (20,1,9,9), (1, 1), (1, 1), (1, 1))#valid
,((1, 2,256,256), (20,2,248,248), (1, 1), (1, 1), (1, 1))#valid
]
shapes_full = [
#test_lenet_28 shape
((60, 30,8,8), (20, 30, 5, 5), (1, 1), (1, 1), (1, 1))#full
#test_lenet_32 shape
,((60, 30,10,10), (20, 30, 5, 5), (1, 1), (1, 1), (1, 1))#full conv_full_patch_stack_padded' N=1
#test_lenet_64 shape
,((10, 30,23,23), (20, 30, 7, 7), (1, 1), (1, 1), (1, 1))#full conv_full_patch_stack_padded' N=3
#test_lenet_108 shape
,((10, 30,45,45), (20, 30, 7, 7), (1, 1), (1, 1), (1, 1))#full 'conv_full_patch_stack_padded' N=9
#test_lenet_256 shape
,((2, 30,116,116), (20, 30, 9,9), (1, 1), (1, 1), (1, 1))#full conv_reference_full
]
version = [-1]
verbose = 1
random = True
for t in exec_conv(version, shapes_valid, verbose, random, 'valid',
print_=None, rtol=1e-3):
t[0](*t[1:])
for t in exec_conv(version, shapes_full, verbose, random, 'full'):
t[0](*t[1:])
def test_stack_rows_segfault_070312():
seed_rng()
# 07/03/2012
# Running this unittest with cuda-memcheck exposes an illegal read.
# THEANO_FLAGS=device=gpu cuda-memcheck nosetests \
# test_conv_cuda_ndarray.py:test_stack_rows_segfault_070312
img = theano.shared(numpy.random.rand(1, 80, 96, 96).astype('float32'))
kern = theano.shared(numpy.random.rand(1, 80, 9, 9).astype('float32'))
out = theano.shared(numpy.random.rand(1, 2, 2, 3).astype('float32'))
op = theano.tensor.nnet.conv.ConvOp(imshp=(80, 96, 96), kshp=(9, 9),
nkern=1, bsize=1)
f = theano.function([], [], updates=[(out, op(img, kern))], mode=theano_mode)
f()
| 40.778172
| 152
| 0.514807
|
577acc0633e20a6cbdd528346e0c6e8b95c28ba7
| 3,675
|
py
|
Python
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
|
zajaczajac/metaworld
|
4febbc4f702c3145b73b012b58b111b2c439032a
|
[
"MIT"
] | null | null | null |
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
|
zajaczajac/metaworld
|
4febbc4f702c3145b73b012b58b111b2c439032a
|
[
"MIT"
] | null | null | null |
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
|
zajaczajac/metaworld
|
4febbc4f702c3145b73b012b58b111b2c439032a
|
[
"MIT"
] | null | null | null |
import numpy as np
from gym.spaces import Box
from metaworld.envs.env_util import get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerPlateSlideEnvV2(SawyerXYZEnv):
def __init__(self):
goal_low = (-0.1, 0.85, 0.)
goal_high = (0.1, 0.9, 0.)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0., 0.6, 0.)
obj_high = (0., 0.6, 0.)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': 0.3,
'obj_init_pos': np.array([0., 0.6, 0.], dtype=np.float32),
'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),
}
self.goal = np.array([0., 0.85, 0.02])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_plate_slide.xml', True)
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {
'reachDist': reachDist,
'goalDist': pullDist,
'epRew': reward,
'pickRew': None,
'success': float(pullDist <= 0.08)
}
return ob, reward, False, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('puck')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self.obj_init_pos = self.init_config['obj_init_pos']
self._target_pos = self.goal.copy()
if self.random_init:
rand_vec = self._get_state_rand_vec()
self.obj_init_pos = rand_vec[:3]
self._target_pos = rand_vec[3:]
self.sim.model.body_pos[
self.model.body_name2id('puck_goal')] = self._target_pos
self._set_obj_xyz(np.zeros(2))
self.objHeight = self.data.get_geom_xpos('puck')[2]
self.maxDist = np.linalg.norm(
self.obj_init_pos[:-1] - self._target_pos[:-1])
self.target_reward = 1000 * self.maxDist + 1000 * 2
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
pullGoal = self._target_pos
reachDist = np.linalg.norm(objPos - fingerCOM)
pullDist = np.linalg.norm(objPos[:-1] - pullGoal[:-1])
c1 = 1000
c2 = 0.01
c3 = 0.001
if reachDist < 0.05:
pullRew = 1000*(self.maxDist - pullDist) + c1*(np.exp(-(pullDist**2)/c2) + np.exp(-(pullDist**2)/c3))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = -reachDist + pullRew
return [reward, reachDist, pullDist]
| 30.122951
| 113
| 0.586667
|
c35b0677859ae183eb6f3a9616beaab222650cc0
| 2,133
|
py
|
Python
|
src/python/pants/testutil/_process_handler_test.py
|
rcuza/pants
|
0429258b181986eed856ae45af93b776727774a0
|
[
"Apache-2.0"
] | 1
|
2021-02-22T18:11:26.000Z
|
2021-02-22T18:11:26.000Z
|
src/python/pants/testutil/_process_handler_test.py
|
rcuza/pants
|
0429258b181986eed856ae45af93b776727774a0
|
[
"Apache-2.0"
] | 13
|
2022-02-18T22:52:57.000Z
|
2022-03-30T10:11:29.000Z
|
src/python/pants/testutil/_process_handler_test.py
|
rcuza/pants
|
0429258b181986eed856ae45af93b776727774a0
|
[
"Apache-2.0"
] | 4
|
2021-06-18T09:11:27.000Z
|
2021-09-30T08:38:43.000Z
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import subprocess
import unittest
from textwrap import dedent
from pants.testutil._process_handler import SubprocessProcessHandler
class TestSubprocessProcessHandler(unittest.TestCase):
def test_exit_1(self):
process = subprocess.Popen(["/bin/sh", "-c", "exit 1"])
process_handler = SubprocessProcessHandler(process)
self.assertEqual(process_handler.wait(), 1)
def test_exit_0(self):
process = subprocess.Popen(["/bin/sh", "-c", "exit 0"])
process_handler = SubprocessProcessHandler(process)
self.assertEqual(process_handler.wait(), 0)
def test_communicate_teeing_retrieves_stdout_and_stderr(self):
process = subprocess.Popen(
[
"/bin/bash",
"-c",
"""
echo "1out"
echo >&2 "1err"
sleep 0.05
echo >&2 "2err"
echo "2out"
sleep 0.05
echo "3out"
sleep 0.05
echo >&2 "3err"
sleep 0.05
exit 1
""",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
process_handler = SubprocessProcessHandler(process)
self.assertEqual(
process_handler.communicate_teeing_stdout_and_stderr(),
(
dedent(
"""\
1out
2out
3out
"""
).encode(),
dedent(
"""\
1err
2err
3err
"""
).encode(),
),
)
# Sadly, this test doesn't test that sys.std{out,err} also receive the output.
# You can see it when you run it, but any way we have of spying on sys.std{out,err}
# isn't pickleable enough to write a test which works.
| 31.835821
| 91
| 0.501172
|
582b34d2c2ff3c49052aa03b3a3a99955d03ed65
| 174
|
py
|
Python
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_Seasonal_DayOfMonth_NoAR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_Seasonal_DayOfMonth_NoAR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_Seasonal_DayOfMonth_NoAR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['NoAR'] );
| 43.5
| 96
| 0.770115
|
de61fb96d25e6063181f93e6787a287f9c662d97
| 727
|
py
|
Python
|
src/collectors/vmsdoms/test/testvmsdoms.py
|
matt-ullmer/Diamond
|
6ea198f3ebe58473467c6dc38b20e683c278192c
|
[
"MIT"
] | 2
|
2015-03-13T05:46:54.000Z
|
2015-11-05T15:54:28.000Z
|
src/collectors/vmsdoms/test/testvmsdoms.py
|
matt-ullmer/Diamond
|
6ea198f3ebe58473467c6dc38b20e683c278192c
|
[
"MIT"
] | 1
|
2019-01-31T01:00:14.000Z
|
2019-01-31T01:00:14.000Z
|
src/collectors/vmsdoms/test/testvmsdoms.py
|
matt-ullmer/Diamond
|
6ea198f3ebe58473467c6dc38b20e683c278192c
|
[
"MIT"
] | 2
|
2019-01-30T23:51:03.000Z
|
2020-02-04T19:27:26.000Z
|
#!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from vmsdoms import VMSDomsCollector
###############################################################################
class TestVMSDomsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('VMSDomsCollector', {
})
self.collector = VMSDomsCollector(config, None)
def test_import(self):
self.assertTrue(VMSDomsCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
| 27.961538
| 79
| 0.488308
|
0df940917fc5f4e31434f2888bd1a6954617079e
| 18,500
|
py
|
Python
|
python/deps/untypy/untypy/impl/protocol.py
|
Panteon32Om/write-your-python-program
|
615b73113466c1c8f901fad0a076b480f62ad437
|
[
"BSD-3-Clause"
] | null | null | null |
python/deps/untypy/untypy/impl/protocol.py
|
Panteon32Om/write-your-python-program
|
615b73113466c1c8f901fad0a076b480f62ad437
|
[
"BSD-3-Clause"
] | null | null | null |
python/deps/untypy/untypy/impl/protocol.py
|
Panteon32Om/write-your-python-program
|
615b73113466c1c8f901fad0a076b480f62ad437
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import sys
import typing
from typing import Protocol, Any, Optional, Callable, Union, TypeVar, Dict, Tuple
from untypy.error import UntypyTypeError, UntypyAttributeError, Frame, Location, ResponsibilityType
from untypy.impl.any import SelfChecker, AnyChecker
from untypy.interfaces import TypeCheckerFactory, CreationContext, TypeChecker, ExecutionContext, \
WrappedFunctionContextProvider
from untypy.util import WrappedFunction, ArgumentExecutionContext, ReturnExecutionContext
from untypy.util.condition import FunctionCondition
from untypy.util.typehints import get_type_hints
class ProtocolFactory(TypeCheckerFactory):
def create_from(self, annotation: Any, ctx: CreationContext) -> Optional[TypeChecker]:
if isinstance(annotation, type) and Protocol in annotation.mro():
return ProtocolChecker(annotation, ctx)
elif hasattr(annotation, '__args__') and hasattr(annotation.__origin__,
'__mro__') and typing.Protocol in annotation.__origin__.__mro__:
return ProtocolChecker(annotation, ctx)
else:
return None
def _find_bound_typevars(clas: type) -> (type, Dict[TypeVar, Any]):
if not hasattr(clas, '__args__') or not hasattr(clas, '__origin__'):
return (clas, dict())
if not hasattr(clas.__origin__, '__parameters__'):
return (clas, dict())
keys = clas.__origin__.__parameters__
values = clas.__args__
if len(keys) != len(values):
raise UntypyAttributeError(f"Some unbound Parameters in {clas.__name__}. "
f"keys={keys} do not match values={values}.",
[Location(
file=inspect.getfile(clas),
line_no=inspect.getsourcelines(clas)[1],
line_span=len(inspect.getsourcelines(clas)[0]))])
return (clas.__origin__, dict(zip(keys, values)))
def get_proto_members(proto: type, ctx: CreationContext) -> dict[
str, Tuple[inspect.Signature, dict[str, TypeChecker], FunctionCondition]]:
blacklist = ['__init__', '__class__', '__delattr__', '__dict__', '__dir__',
'__doc__', '__getattribute__', '__getattr__', '__init_subclass__',
'__new__', '__setattr__', '__subclasshook__', '__weakref__',
'__abstractmethods__', '__class_getitem__']
member_dict = {}
for [name, member] in inspect.getmembers(proto):
if name in blacklist:
continue
if inspect.isfunction(member):
member = WrappedFunction.find_original(member)
signature = inspect.signature(member)
is_typed = len(inspect.getfullargspec(member).annotations) != 0
checkers = {}
if not is_typed:
# Use Any for any type
for key in signature.parameters:
if key == 'self':
checkers[key] = SelfChecker()
else:
checkers[key] = AnyChecker()
checkers['return'] = AnyChecker()
else:
annotations = get_type_hints(member, ctx)
for key in signature.parameters:
if key == 'self':
checkers[key] = SelfChecker()
else:
param = signature.parameters[key]
if param.annotation is inspect.Parameter.empty:
raise ctx.wrap(UntypyAttributeError(
f"Missing annotation for argument '{key}' of function {member.__name__} "
f"in protocol {proto.__name__}\n"))
param_anot = annotations[key]
if param_anot is proto:
checker = SimpleInstanceOfChecker(proto, None)
else:
checker = ctx.find_checker(param_anot)
if checker is None:
raise ctx.wrap(UntypyAttributeError(f"\n\tUnsupported type annotation: {param.annotation}\n"
f"for argument '{key}' of function {member.__name__} "
f"in protocol {proto.__name__}.\n"))
checkers[key] = checker
if signature.return_annotation is inspect.Parameter.empty:
return_annotation = None
else:
return_annotation = annotations['return']
if return_annotation is proto: # Self as Return Type would led to endless recursion
return_checker = SimpleInstanceOfChecker(proto, None)
else:
return_checker = ctx.find_checker(return_annotation)
if return_checker is None:
raise ctx.wrap(UntypyAttributeError(f"\n\tUnsupported type annotation: {signature.return_annotation}\n"
f"for return value of function {member.__name__} "
f"in protocol-like {proto.__name__}.\n"))
checkers['return'] = return_checker
fc = None
if hasattr(member, '__fc'):
fc = getattr(member, '__fc')
member_dict[name] = (signature, checkers, fc)
return member_dict
class ProtocolChecker(TypeChecker):
def __init__(self, annotation: type, ctx: CreationContext, *, altname : Optional[str] = None):
(proto, typevars) = _find_bound_typevars(annotation)
self.ctx = ctx.with_typevars(typevars)
self.proto = proto
self._members = None
self.typevars = typevars
self.wrapper_types = dict()
self.altname = altname
@property
def members(self):
if not self._members:
self._members = get_proto_members(self.proto, self.ctx)
return self._members
def may_change_identity(self) -> bool:
return True
def check_and_wrap(self, arg: Any, ctx: ExecutionContext) -> Any:
if hasattr(arg, '_ProtocolWrappedFunction__inner'):
# no double wrapping
arg = getattr(arg, '_ProtocolWrappedFunction__inner')
if type(arg) in self.wrapper_types:
return self.wrapper_types[type(arg)](arg, ctx)
else:
wrapped_type = ProtocolWrapper(self, arg, self.members, ctx)
self.wrapper_types[type(arg)] = wrapped_type
return wrapped_type(arg, ctx)
def base_type(self) -> list[Any]:
# Prevent Classes implementing multiple Protocols in one Union by accident.
return [Protocol]
def describe(self) -> str:
if self.altname is not None:
return self.altname
desc = set([])
for name in self.members:
(sig, binds, cond) = self.members[name]
for argname in sig.parameters:
if isinstance(sig.parameters[argname].annotation, TypeVar):
desc.add(binds[argname].describe())
if isinstance(sig.return_annotation, TypeVar):
desc.add(binds['return'].describe())
if len(desc) > 0:
return f"{self.proto.__name__}[" + (', '.join(desc)) + "]"
else:
return f"{self.proto.__name__}"
def protocol_type(self) -> str:
return f"protocol"
def protoname(self):
return self.describe()
def ProtocolWrapper(protocolchecker: ProtocolChecker, originalValue: Any,
members: dict[str, Tuple[inspect.Signature, dict[str, TypeChecker], FunctionCondition]],
ctx: ExecutionContext):
list_of_attr = dict()
original = type(originalValue)
for fnname in members:
if not hasattr(original, fnname):
raise ctx.wrap(UntypyTypeError(
expected=protocolchecker.describe(),
given=originalValue
)).with_header(
f"{original.__name__} does not meet the requirements of protocol {protocolchecker.proto.__name__}."
).with_note(f"It is missing the function '{fnname}'.")
original_fn = getattr(original, fnname)
try:
# fails on built ins - YEAH
original_fn_signature = inspect.signature(original_fn)
except:
original_fn_signature = None
if hasattr(original_fn, '__wf'):
original_fn = getattr(original_fn, '__wf')
(sig, argdict, fc) = members[fnname]
for param in sig.parameters:
if original_fn_signature is not None and param not in original_fn_signature.parameters:
raise ctx.wrap(UntypyTypeError(
expected=protocolchecker.describe(),
given=originalValue
)).with_header(
f"{original.__name__} does not meet the requirements of protocol {protocolchecker.proto.__name__}."
).with_note(f"The signature of '{fnname}' does not match. Missing required parameter {param}.")
list_of_attr[fnname] = ProtocolWrappedFunction(original_fn, sig, argdict, protocolchecker, fc).build()
def constructor(me, inner, ctx):
me._ProtocolWrappedFunction__inner = inner
me._ProtocolWrappedFunction__ctx = ctx
def __getattr__(me, name):
return getattr(me._ProtocolWrappedFunction__inner, name)
def __setattr__(me, name, value):
if name == '_ProtocolWrappedFunction__inner':
super(type(me), me).__setattr__('_ProtocolWrappedFunction__inner', value)
return
if name == '_ProtocolWrappedFunction__ctx':
super(type(me), me).__setattr__('_ProtocolWrappedFunction__ctx', value)
return
return setattr(me._ProtocolWrappedFunction__inner, name, value)
list_of_attr['__init__'] = constructor
list_of_attr['__getattr__'] = __getattr__ # allow access of attributes
list_of_attr['__setattr__'] = __setattr__ # allow access of attributes
name = f"{protocolchecker.proto.__name__}For{original.__name__}"
return type(name, (), list_of_attr)
class ProtocolWrappedFunction(WrappedFunction):
def __init__(self, inner: Union[Callable, WrappedFunction], signature: inspect.Signature,
checker: Dict[str, TypeChecker],
protocol: ProtocolChecker,
fc: FunctionCondition):
self.inner = inner
self.signature = signature
self.checker = checker
self.protocol = protocol
self.fc = fc
def build(self):
fn = WrappedFunction.find_original(self.inner)
fn_of_protocol = getattr(self.protocol.proto, fn.__name__)
if hasattr(fn_of_protocol, '__wf'):
fn_of_protocol = getattr(fn_of_protocol, '__wf')
def wrapper(me, *args, **kwargs):
inner_object = me.__inner
inner_ctx = me.__ctx
caller = sys._getframe(1)
(args, kwargs, bind1) = self.wrap_arguments(lambda n: ArgumentExecutionContext(fn_of_protocol, caller, n),
(inner_object, *args), kwargs)
if isinstance(self.inner, WrappedFunction):
(args, kwargs, bind2) = self.inner.wrap_arguments(lambda n:
ProtocolArgumentExecutionContext(self, n,
inner_object,
inner_ctx),
args, kwargs)
ret = fn(*args, **kwargs)
if isinstance(self.inner, WrappedFunction):
ret = self.inner.wrap_return(ret, bind2, ProtocolReturnExecutionContext(self,
ResponsibilityType.IN,
inner_object,
inner_ctx))
return self.wrap_return(ret, bind1, ProtocolReturnExecutionContext(self,
ResponsibilityType.OUT, inner_object,
inner_ctx))
async def async_wrapper(*args, **kwargs):
raise AssertionError("Not correctly implemented see wrapper")
if inspect.iscoroutine(self.inner):
w = async_wrapper
else:
w = wrapper
setattr(w, '__wrapped__', fn)
setattr(w, '__name__', fn.__name__)
setattr(w, '__signature__', self.signature)
setattr(w, '__wf', self)
return w
def get_original(self):
return self.inner
def wrap_arguments(self, ctxprv: WrappedFunctionContextProvider, args, kwargs):
try:
bindings = self.signature.bind(*args, **kwargs)
except TypeError as e:
err = UntypyTypeError(header=str(e))
if "self" not in self.signature.parameters:
err = err.with_note("Hint: 'self'-parameter was omitted in declaration.")
raise ctxprv("").wrap(err)
bindings.apply_defaults()
if self.fc is not None:
self.fc.prehook(bindings, ctxprv)
for name in bindings.arguments:
check = self.checker[name]
ctx = ctxprv(name)
bindings.arguments[name] = check.check_and_wrap(bindings.arguments[name], ctx)
return bindings.args, bindings.kwargs, bindings
def wrap_return(self, ret, bindings, ctx: ExecutionContext):
check = self.checker['return']
if self.fc is not None:
self.fc.posthook(ret, bindings, ctx)
return check.check_and_wrap(ret, ctx)
def describe(self) -> str:
fn = WrappedFunction.find_original(self.inner)
return f"{fn.__name__}" + str(self.signature)
def checker_for(self, name: str) -> TypeChecker:
return self.checker[name]
def declared(self) -> Location:
fn = WrappedFunction.find_original(self.inner)
return WrappedFunction.find_location(getattr(self.protocol.proto, fn.__name__))
class ProtocolReturnExecutionContext(ExecutionContext):
def __init__(self, wf: ProtocolWrappedFunction, invert: ResponsibilityType, me: Any, ctx: ExecutionContext):
self.wf = wf
self.invert = invert
self.me = me
self.ctx = ctx
def wrap(self, err: UntypyTypeError) -> UntypyTypeError:
err = ReturnExecutionContext(self.wf).wrap(err)
if err.responsibility_type is self.invert:
return err
responsable = WrappedFunction.find_location(self.wf)
(decl, ind) = err.next_type_and_indicator()
err = err.with_inverted_responsibility_type()
err = err.with_frame(Frame(
decl,
ind,
declared=self.wf.declared(),
responsable=responsable
))
inner = self.wf.inner
if isinstance(inner, WrappedFunction):
err = err.with_note(
f"The return value of method '{WrappedFunction.find_original(self.wf).__name__}' does violate the {self.wf.protocol.protocol_type()} '{self.wf.protocol.proto.__name__}'.")
err = err.with_note(
f"The annotation '{inner.checker_for('return').describe()}' is incompatible with the {self.wf.protocol.protocol_type()}'s annotation '{self.wf.checker_for('return').describe()}'\nwhen checking against the following value:")
previous_chain = UntypyTypeError(
self.me,
f"{self.wf.protocol.protoname()}"
).with_header(
f"{type(self.me).__name__} does not implement {self.wf.protocol.protocol_type()} {self.wf.protocol.protoname()} correctly.")
previous_chain = self.ctx.wrap(previous_chain)
return err.with_previous_chain(previous_chain)
class ProtocolArgumentExecutionContext(ExecutionContext):
def __init__(self, wf: ProtocolWrappedFunction, arg_name: str, me: Any, ctx: ExecutionContext):
self.wf = wf
self.arg_name = arg_name
self.me = me
self.ctx = ctx
def wrap(self, err: UntypyTypeError) -> UntypyTypeError:
(original_expected, _ind) = err.next_type_and_indicator()
err = ArgumentExecutionContext(self.wf, None, self.arg_name).wrap(err)
responsable = WrappedFunction.find_location(self.wf)
(decl, ind) = err.next_type_and_indicator()
err = err.with_frame(Frame(
decl,
ind,
declared=self.wf.declared(),
responsable=responsable
))
err = err.with_note(
f"Argument {self.arg_name} of method {WrappedFunction.find_original(self.wf).__name__} violates the type declared by the {self.wf.protocol.protocol_type()} {self.wf.protocol.proto.__name__}.")
err = err.with_note(
f"Annotation {original_expected} is incompatible with the {self.wf.protocol.protocol_type()}'s annotation {self.wf.checker_for(self.arg_name).describe()}.")
previous_chain = UntypyTypeError(
self.me,
f"{self.wf.protocol.protoname()}"
).with_header(
f"{type(self.me).__name__} does not implement {self.wf.protocol.protocol_type()} {self.wf.protocol.protoname()} correctly.")
previous_chain = self.ctx.wrap(previous_chain)
# err = err.with_inverted_responsibility_type()
return err.with_previous_chain(previous_chain)
class SimpleInstanceOfChecker(TypeChecker):
def __init__(self, annotation: type, ctx: CreationContext):
self.annotation = annotation
def check_and_wrap(self, arg: Any, ctx: ExecutionContext) -> Any:
if isinstance(arg, self.annotation):
return arg
else:
raise ctx.wrap(UntypyTypeError(arg, self.describe()))
def describe(self) -> str:
return self.annotation.__name__
def base_type(self) -> Any:
return [self.annotation]
| 43.529412
| 239
| 0.587243
|
aa91e0de0cdccc573de3011699089dea43b28031
| 740
|
py
|
Python
|
email_test.py
|
adambreznicky/smudge_python
|
af7ba221890253ac6fe7f38691b351861f8b3d96
|
[
"MIT"
] | 1
|
2017-05-24T02:05:20.000Z
|
2017-05-24T02:05:20.000Z
|
email_test.py
|
adambreznicky/smudge_python
|
af7ba221890253ac6fe7f38691b351861f8b3d96
|
[
"MIT"
] | null | null | null |
email_test.py
|
adambreznicky/smudge_python
|
af7ba221890253ac6fe7f38691b351861f8b3d96
|
[
"MIT"
] | null | null | null |
__file__ = 'email_test'
__date__ = '8/1/2014'
__author__ = 'ABREZNIC'
import smtplib, base64
#
# FROM = 'adam.breznicky@txdot.gov'
# TO = ['tom.neville@txdot.gov'] #must be a list
# SUBJECT = "Testing sending using python"
# TEXT = "tom, holler at me if you received this email"
# message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
# """ % (FROM, ", ".join(TO), SUBJECT, TEXT)
# username = "adam.breznicky@txdot.gov"
# password = base64.b64decode("U2F0dXJkYXkxMjM=")
# server = smtplib.SMTP('owa.txdot.gov', 25)
# server.ehlo()
# server.starttls()
# server.ehlo()
# server.login(username, password)
# server.sendmail(FROM, TO, message)
# server.close()
# print base64.b64encode("Sunday123")
print base64.b64decode("U3VuZGF5MTIz")
| 33.636364
| 56
| 0.678378
|
9fafadca6ca846cb7c3f2094f38b4bd9cc5c7c4d
| 29,464
|
py
|
Python
|
discreteMarkovChain/markovChain.py
|
gvanderheide/discreteMarkovChain
|
8325ffdb791c109eee600684ee0dc9126ce80700
|
[
"MIT"
] | 43
|
2016-07-17T23:54:38.000Z
|
2022-01-06T02:59:06.000Z
|
discreteMarkovChain/markovChain.py
|
gvanderheide/discreteMarkovChain
|
8325ffdb791c109eee600684ee0dc9126ce80700
|
[
"MIT"
] | 9
|
2016-04-01T11:52:02.000Z
|
2017-07-11T08:54:01.000Z
|
discreteMarkovChain/markovChain.py
|
gvanderheide/discreteMarkovChain
|
8325ffdb791c109eee600684ee0dc9126ce80700
|
[
"MIT"
] | 11
|
2016-04-14T09:08:18.000Z
|
2021-05-24T08:41:31.000Z
|
"""
Possible fixes:
-Check that the state codes do not suffer from integer overflow.
-Improve memory usage.
"""
from __future__ import print_function
import numpy as np
from scipy.sparse import coo_matrix,csr_matrix, csgraph, eye, vstack, isspmatrix, isspmatrix_csr
from scipy.sparse.linalg import eigs, gmres, spsolve, inv
from numpy.linalg import norm
from collections import OrderedDict,defaultdict
from scipy.sparse import dok_matrix
try: #For python 3 functionality.
from itertools import imap
except ImportError:
imap = map
class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
class finiteMarkovChain(markovChain):
def __init__(self,P=None):
super(finiteMarkovChain,self).__init__(P)
def absorbTime(self):
P = self.getTransitionMatrix(probabilities=True)
components,labels = csgraph.connected_components(P, directed=True, connection='strong',return_labels=True)
if components == 1:
print("no absorbing states")
return
transientStates = np.ones(P.shape[0],dtype=bool)
for component in range(components):
indices = np.where(labels==component)[0]
n = len(indices)
if n==1:
probSum = P[indices,indices].sum()
else:
probSum = P[np.ix_(indices,indices)].sum()
if np.isclose(probSum,n):
transientStates[indices] = False
indices = np.where(transientStates)[0]
n = len(indices)
if n==1:
Q = P[indices,indices]
else:
Q = P[np.ix_(indices,indices)]
#N will be dense
N = inv(eye(n)-Q).A
N2 = N*(2*N[np.arange(n),np.arange(n)]-np.eye(n))-np.power(N,2)
t = np.zeros(P.shape[0])
t[indices] = np.sum(N,axis=1)
for index in indices:
print( self.mapping[index],t[index] )
| 45.894081
| 213
| 0.598561
|
eead98d2269e447193283b721097383b56019f3f
| 2,531
|
py
|
Python
|
folderutils.py
|
stephbu/photo_organizer
|
e9fb52f0538e34baad9bdba9f38dcd43f45e6f3b
|
[
"MIT"
] | 5
|
2019-11-16T09:49:03.000Z
|
2021-01-30T19:31:22.000Z
|
folderutils.py
|
stephbu/photo_organizer
|
e9fb52f0538e34baad9bdba9f38dcd43f45e6f3b
|
[
"MIT"
] | null | null | null |
folderutils.py
|
stephbu/photo_organizer
|
e9fb52f0538e34baad9bdba9f38dcd43f45e6f3b
|
[
"MIT"
] | null | null | null |
# folderutils.py - (c)2015 stephbu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Functions to assist in folder enumeration and naming"""
__author__ = "stephbu"
import glob
import os
from datetime import datetime
def generate_folder(source_date):
"""
Generate partial folder name based on provided source_date
:param source_date: datetime
"""
assert isinstance(source_date, datetime)
date_part = source_date.date()
year = date_part.year
month = date_part.month
day = date_part.day
path = "{0:04d}/{1:02d}-{2:02d}".format(year, month, day)
return path
def either(c):
return '[%s%s]'%(c.lower(),c.upper()) if c.isalpha() else c
def enumerate_files(source_folder, extension):
"""Iterator to enumerate through source_folder and all subfolders looking for files with specified extension"""
for root, dirs, files in os.walk(source_folder):
# support for single and multiple extensions
if isinstance(extension, str):
extension = (extension,)
for ext in extension:
for filename in glob.iglob(os.path.join(root, "*." + ''.join(either(char) for char in ext))):
yield root, filename
def ensure_dir(filename):
"""Ensure directory of specified filename exists and is a directory, or is created"""
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise IOError("path is file")
| 32.87013
| 115
| 0.716318
|
21042d9f986e447a680b1bb6f61f87be7385de4c
| 6,549
|
py
|
Python
|
pyglet/media/codecs/__init__.py
|
pro-roy/pyglet
|
c06d5f95a029fc6a9a3fbe14e41f9c568f00b49f
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/media/codecs/__init__.py
|
pro-roy/pyglet
|
c06d5f95a029fc6a9a3fbe14e41f9c568f00b49f
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/media/codecs/__init__.py
|
pro-roy/pyglet
|
c06d5f95a029fc6a9a3fbe14e41f9c568f00b49f
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from .base import *
import os.path
import pyglet
_debug = pyglet.options['debug_media']
_decoders = [] # List of registered MediaDecoders
_encoders = [] # List of registered MediaEncoders
_decoder_extensions = {} # Map str -> list of matching MediaDecoders
_encoder_extensions = {} # Map str -> list of matching MediaEncoders
class MediaDecoder:
def get_file_extensions(self):
"""Return a list or tuple of accepted file extensions, e.g. ['.wav', '.ogg']
Lower-case only.
"""
return []
def decode(self, file, filename, streaming):
"""Read the given file object and return an instance of `Source`
or `StreamingSource`.
Throws MediaDecodeException if there is an error. `filename`
can be a file type hint.
"""
raise NotImplementedError()
def __hash__(self):
return hash(self.__class__.__name__)
def __eq__(self, other):
return self.__class__.__name__ == other.__class__.__name__
def __repr__(self):
return "{0}{1}".format(self.__class__.__name__, self.get_file_extensions())
class MediaEncoder:
def get_file_extensions(self):
"""Return a list or tuple of accepted file extensions, e.g. ['.wav', '.ogg']
Lower-case only.
"""
return []
def encode(self, source, file, filename):
"""Encode the given source to the given file. `filename`
provides a hint to the file format desired. options are
encoder-specific, and unknown options should be ignored or
issue warnings.
"""
raise NotImplementedError()
def __hash__(self):
return hash(self.__class__.__name__)
def __eq__(self, other):
return self.__class__.__name__ == other.__class__.__name__
def __repr__(self):
return "{0}{1}".format(self.__class__.__name__, self.get_file_extensions())
def get_decoders(filename=None):
"""Get an ordered list of all decoders. If a `filename` is provided,
decoders supporting that extension will be ordered first in the list.
"""
decoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
decoders += _decoder_extensions.get(extension, [])
decoders += [e for e in _decoders if e not in decoders]
return decoders
def get_encoders(filename=None):
"""Get an ordered list of all encoders. If a `filename` is provided,
encoders supporting that extension will be ordered first in the list.
"""
encoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
encoders += _encoder_extensions.get(extension, [])
encoders += [e for e in _encoders if e not in encoders]
return encoders
def add_decoders(module):
"""Add a decoder module. The module must define `get_decoders`. Once
added, the appropriate decoders defined in the codec will be returned by
pyglet.media.codecs.get_decoders.
"""
for decoder in module.get_decoders():
if decoder in _decoders:
continue
_decoders.append(decoder)
for extension in decoder.get_file_extensions():
if extension not in _decoder_extensions:
_decoder_extensions[extension] = []
_decoder_extensions[extension].append(decoder)
def add_encoders(module):
"""Add an encoder module. The module must define `get_encoders`. Once
added, the appropriate encoders defined in the codec will be returned by
pyglet.media.codecs.get_encoders.
"""
for encoder in module.get_encoders():
if encoder in _encoders:
continue
_encoders.append(encoder)
for extension in encoder.get_file_extensions():
if extension not in _encoder_extensions:
_encoder_extensions[extension] = []
_encoder_extensions[extension].append(encoder)
def add_default_media_codecs():
# Add all bundled codecs. These should be listed in order of
# preference. This is called automatically by pyglet.media.
try:
from . import wave
add_decoders(wave)
add_encoders(wave)
except ImportError:
pass
try:
if have_ffmpeg():
from . import ffmpeg
add_decoders(ffmpeg)
except ImportError:
pass
def have_ffmpeg():
"""Check if FFmpeg library is available.
Returns:
bool: True if FFmpeg is found.
.. versionadded:: 1.4
"""
try:
from . import ffmpeg_lib
if _debug:
print('FFmpeg available, using to load media files.')
return True
except (ImportError, FileNotFoundError):
if _debug:
print('FFmpeg not available.')
return False
| 34.468421
| 84
| 0.661322
|
5ca340e1af6e32e35130df210cee8edfaaed7b42
| 2,729
|
py
|
Python
|
geokey/core/middleware.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
geokey/core/middleware.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
geokey/core/middleware.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
"""Core middleware."""
# https://gist.github.com/barrabinfc/426829
from django import http
from django.db import connection
from .signals import request_accessor
try:
from . import settings
XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS
XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS
XS_SHARING_ALLOWED_HEADERS = settings.XS_SHARING_ALLOWED_HEADERS
except:
XS_SHARING_ALLOWED_ORIGINS = '*'
XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'PATCH', 'DELETE']
XS_SHARING_ALLOWED_HEADERS = ['Authorization', 'Content-Type']
class XsSharing(object):
"""
This middleware allows cross-domain XHR using the HTML5 postMessage API.
Access-Control-Allow-Origin: http://foo.example
Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE
"""
def process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS)
response['Access-Control-Allow-Headers'] = ",".join(XS_SHARING_ALLOWED_HEADERS)
return response
return None
def process_response(self, request, response):
# Avoid unnecessary work
if response.has_header('Access-Control-Allow-Origin'):
return response
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS)
response['Access-Control-Allow-Headers'] = ",".join(XS_SHARING_ALLOWED_HEADERS)
return response
class TerminalLogging(object):
def process_response(self, request, response):
from sys import stdout
if stdout.isatty():
for query in connection.queries :
print("\033[1;31m[%s]\033[0m \033[1m%s\033[0m" % (query['time'],
" ".join(query['sql'].split())))
return response
class RequestProvider(object):
def __init__(self):
self._request = None
request_accessor.connect(self)
def process_view(self, request, view_func, view_args, view_kwargs):
self._request = request
return None
def __call__(self, **kwargs):
return self._request
def show_debug_toolbar(request):
"""Custom function to determine whether to show the debug toolbar."""
from django.conf import settings
# Do not include the debug toolbar on Ajax requests
if request.is_ajax():
return False
return bool(settings.DEBUG and settings.DEBUG_TOOLBAR)
| 33.691358
| 91
| 0.689263
|
2e0ff70dbd0d219d640060f85213f03ac0362d73
| 591
|
py
|
Python
|
users/migrations/0005_user_date_joined.py
|
amandasavluchinske/mooc
|
3d83748b2ddda646597b5a3b57f838ed0fb99b3e
|
[
"MIT"
] | null | null | null |
users/migrations/0005_user_date_joined.py
|
amandasavluchinske/mooc
|
3d83748b2ddda646597b5a3b57f838ed0fb99b3e
|
[
"MIT"
] | 2
|
2021-05-07T01:37:50.000Z
|
2022-02-10T10:08:41.000Z
|
users/migrations/0005_user_date_joined.py
|
amandasavluchinske/mooc
|
3d83748b2ddda646597b5a3b57f838ed0fb99b3e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-11 18:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20180410_1115'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Data de entrada'),
preserve_default=False,
),
]
| 25.695652
| 125
| 0.65313
|
8a573e1f8fd96b1b247b66c53d3fc93b2d411332
| 758
|
py
|
Python
|
OnsiteContests/2/kangaroo.py
|
koltpython/python-contests
|
2f8f51f39d7809dcadbcd6dd13a658faffcf902e
|
[
"MIT"
] | 1
|
2019-02-27T17:57:30.000Z
|
2019-02-27T17:57:30.000Z
|
OnsiteContests/2/kangaroo.py
|
koltpython/python-contests
|
2f8f51f39d7809dcadbcd6dd13a658faffcf902e
|
[
"MIT"
] | null | null | null |
OnsiteContests/2/kangaroo.py
|
koltpython/python-contests
|
2f8f51f39d7809dcadbcd6dd13a658faffcf902e
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
def kangaroo(x1, v1, x2, v2):
# Think about why we don't have to use elifs
if x1 == x2:
return 'YES'
# We know v1, v2 are positive
if x1 > x2 and v1 >= v2:
return 'NO'
if x1 < x2 and v1 <= v2:
return 'NO'
if (x1 - x2) % (v1 - v2) == 0:
return 'YES'
else:
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
x1V1X2V2 = input().split()
x1 = int(x1V1X2V2[0])
v1 = int(x1V1X2V2[1])
x2 = int(x1V1X2V2[2])
v2 = int(x1V1X2V2[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
| 17.627907
| 48
| 0.55277
|
ec60ac72b484d849c5928952b325ed22e4a0827a
| 2,611
|
py
|
Python
|
setup.py
|
haowang7/adafruit-beaglebone-io-python
|
4cc4b761886c57b1b022d447eee20052c89ef1f4
|
[
"MIT"
] | 1
|
2018-08-06T17:31:30.000Z
|
2018-08-06T17:31:30.000Z
|
setup.py
|
haowang7/adafruit-beaglebone-io-python
|
4cc4b761886c57b1b022d447eee20052c89ef1f4
|
[
"MIT"
] | null | null | null |
setup.py
|
haowang7/adafruit-beaglebone-io-python
|
4cc4b761886c57b1b022d447eee20052c89ef1f4
|
[
"MIT"
] | null | null | null |
try:
from overlays import builder
builder.compile()
builder.copy()
except:
pass
import distribute_setup
import io
import sys
import platform
distribute_setup.use_setuptools()
from setuptools import setup, Extension, find_packages
open_as_utf8 = lambda x: io.open(x, encoding='utf-8')
kernel = platform.release()
if kernel >= '4.1.0':
kernel41 = [('BBBVERSION41', None)]
else:
kernel41 = None
CFLAGS = ['-Wall', '-Werror', '-Wextra', '-Wno-missing-field-initializers', '-Wno-strict-aliasing' ]
classifiers = ['Development Status :: 3 - Alpha',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Home Automation',
'Topic :: System :: Hardware']
extension_args = {
'include_dirs': ['source/include/'],
'extra_compile_args': CFLAGS,
'define_macros': kernel41
}
setup(name = 'Adafruit_BBIO',
version = '1.0.10',
author = 'Justin Cooper',
author_email = 'justin@adafruit.com',
description = 'A module to control BeagleBone IO channels',
long_description = open_as_utf8('README.md').read() + open_as_utf8('CHANGELOG.md').read(),
license = 'MIT',
keywords = 'Adafruit BeagleBone IO GPIO PWM ADC',
url = 'https://github.com/adafruit/adafruit-beaglebone-io-python/',
classifiers = classifiers,
packages = find_packages(),
py_modules = ['Adafruit_I2C'],
ext_modules = [Extension('Adafruit_BBIO.GPIO', ['source/py_gpio.c', 'source/event_gpio.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.PWM', ['source/py_pwm.c', 'source/c_pwm.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.ADC', ['source/py_adc.c', 'source/c_adc.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.SPI', ['source/spimodule.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.UART', ['source/py_uart.c', 'source/c_pinmux.c', 'source/c_uart.c', 'source/constants.c', 'source/common.c'], **extension_args)] )
| 43.516667
| 183
| 0.6036
|
24fb9fbc5a3526d1ba8ca8068933ec2e9bb6805b
| 7,973
|
py
|
Python
|
tlquantum/tests/test_tt_gates.py
|
taylorpatti/quantum
|
67af83844d447d0d17e78cdfb8a15ec271f84f24
|
[
"BSD-3-Clause"
] | null | null | null |
tlquantum/tests/test_tt_gates.py
|
taylorpatti/quantum
|
67af83844d447d0d17e78cdfb8a15ec271f84f24
|
[
"BSD-3-Clause"
] | null | null | null |
tlquantum/tests/test_tt_gates.py
|
taylorpatti/quantum
|
67af83844d447d0d17e78cdfb8a15ec271f84f24
|
[
"BSD-3-Clause"
] | null | null | null |
import tensorly as tl
from tensorly.tt_matrix import TTMatrix
from tensorly.random import random_tt
from tensorly.testing import assert_array_almost_equal
from torch import cos, sin
from opt_einsum import contract
from ..tt_gates import exp_pauli_y, UnaryGatesUnitary, RotY, cnot, cz, so4, BinaryGatesUnitary
from ..tt_operators import identity
from ..tt_contraction import contraction_eq
# Author: Taylor Lee Patti <taylorpatti@g.harvard.edu>
# License: BSD 3 clause
err_tol = 5 #decimals precision
def manual_rotY_unitary(thetas):
nqubits, layer = len(thetas), []
iden, epy = IDENTITY.to(thetas.device), EXP_PAULI_Y.to(thetas.device)
for i in range(nqubits):
layer.append(iden*tl.cos(thetas[i]/2)+epy*tl.sin(thetas[i]/2))
return layer
def test_EXP_PAULI_Y():
exp_pauli_y_temp = TTMatrix([exp_pauli_y()])
assert_array_almost_equal(exp_pauli_y_temp.to_matrix(), tl.tensor([[0., -1],[1, 0]]))
def test_RotY():
rotY = RotY()
RotY_temp = TTMatrix([rotY.forward()])
theta = tl.tensor([rotY.theta])
RotY_dense = tl.tensor([[1,0],[0,1]])*tl.cos(theta/2) + tl.tensor([[0, -1],[1, 0]])*tl.sin(theta/2)
assert_array_almost_equal(RotY_temp.to_matrix(), RotY_dense)
def test_CNOT():
CNOT_temp = TTMatrix([cnot()[0].forward(), cnot()[1].forward()])
CNOT_temp = CNOT_temp.to_matrix()
dense_CNOT = tl.tensor([[1.,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])
assert_array_almost_equal(CNOT_temp, dense_CNOT)
def test_cz_tt():
CZ_temp = TTMatrix([cz()[0].forward(), cz()[1].forward()])
CZ_temp = CZ_temp.to_matrix()
dense_CZ = tl.tensor([[1.,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,-1]])
assert_array_almost_equal(CZ_temp, dense_CZ)
def test_RotYUnitary():
nqubits, ncontraq = 8, 2
unitary = UnaryGatesUnitary(nqubits, ncontraq)
thetas = tl.tensor([theta.data for theta in unitary.parameters()])
layer = TTMatrix(unitary.forward()).to_matrix()
dense_layer = tl.tensor([[1,0],[0,1]])*tl.cos(thetas[0]/2) + tl.tensor([[0, -1],[1, 0]])*tl.sin(thetas[0]/2)
for theta in thetas[1::]:
dense_layer = tl.kron(dense_layer, tl.tensor([[1,0],[0,1]])*tl.cos(theta/2) + tl.tensor([[0, -1],[1, 0]])*tl.sin(theta/2))
assert_array_almost_equal(layer, dense_layer)
nqubits, contraq = 9, 2
unitary = UnaryGatesUnitary(nqubits, ncontraq)
thetas = tl.tensor([theta.data for theta in unitary.parameters()])
layer = TTMatrix(unitary.forward()).to_matrix()
dense_layer = tl.tensor([[1,0],[0,1]])*tl.cos(thetas[0]/2) + tl.tensor([[0, -1],[1, 0]])*tl.sin(thetas[0]/2)
for theta in thetas[1::]:
dense_layer = tl.kron(dense_layer, tl.tensor([[1,0],[0,1]])*tl.cos(theta/2) + tl.tensor([[0, -1],[1, 0]])*tl.sin(theta/2))
assert_array_almost_equal(layer, dense_layer)
def test_q2_gate_layers():
nqubits = 4
nlayers = 2
dims = tuple([2 for i in range(nqubits)])
rank = [1] + [2 for i in range(nqubits-1)] + [1]
state = random_tt(dims, rank=rank)
dense_state = state.to_tensor().reshape(-1,1)
layers = [BinaryGatesUnitary(nqubits, 1, cz(), 0).forward(), BinaryGatesUnitary(nqubits, 1, cz(), 1).forward()]
eq = contraction_eq(nqubits, nlayers)
out = contract(eq, *state, *layers[0], *layers[1], *state)
dense_CZ = tl.tensor([[1.,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,-1]])
dense_layer1 = tl.kron(dense_CZ, dense_CZ)
dense_layer2 = tl.kron(tl.kron(tl.tensor([[1,0],[0,1]]), dense_CZ), tl.tensor([[1,0],[0,0]])) + tl.kron(tl.kron(tl.tensor([[1,0],[0,-1]]), dense_CZ), tl.tensor([[0,0],[0,1]]))
true_out = tl.dot(tl.transpose(dense_state), tl.dot(dense_layer2, tl.dot(dense_layer1, dense_state)))
assert_array_almost_equal(out, true_out[0], decimal=5)
layers = [BinaryGatesUnitary(nqubits, 1, cnot(), 0).forward(), BinaryGatesUnitary(nqubits, 1, cnot(), 1).forward()]
eq = contraction_eq(nqubits, nlayers)
out = contract(eq, *state, *layers[0], *layers[1], *state)
dense_CNOT = tl.tensor([[1.,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])
dense_layer1 = tl.kron(dense_CNOT, dense_CNOT)
dense_layer2 = tl.kron(tl.kron(tl.tensor([[1,0],[0,1]]), dense_CNOT), tl.tensor([[1,0],[0,0]])) + tl.kron(tl.kron(tl.tensor([[0,1],[1,0]]), dense_CNOT), tl.tensor([[0,0],[0,1]]))
true_out = tl.dot(tl.transpose(dense_state), tl.dot(dense_layer2, tl.dot(dense_layer1, dense_state)))
assert_array_almost_equal(out, true_out[0], decimal=err_tol)
nqubits = 5
nlayers = 2
dims = tuple([2 for i in range(nqubits)])
rank = [1] + [2 for i in range(nqubits-1)] + [1]
state = random_tt(dims, rank=rank)
dense_state = state.to_tensor().reshape(-1,1)
layers = [BinaryGatesUnitary(nqubits, 1, cnot(), 0).forward(), BinaryGatesUnitary(nqubits, 1, cnot(), 1).forward()]
eq = contraction_eq(nqubits, nlayers)
out = contract(eq, *state, *layers[0], *layers[1], *state)
dense_CNOT = tl.tensor([[1.,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])
dense_layer1 = tl.kron(tl.kron(dense_CNOT, dense_CNOT), tl.tensor([[1,0],[0,1]]))
dense_layer2 = tl.kron(tl.tensor([[1,0],[0,1]]), tl.kron(dense_CNOT, dense_CNOT))
true_out = tl.dot(tl.transpose(dense_state), tl.dot(dense_layer2, tl.dot(dense_layer1, dense_state)))
assert_array_almost_equal(out, true_out[0], decimal=err_tol)
def test_so4():
so4_01 = so4(1, 0)
theta01 = so4_01[0].theta
true_so4_01 = tl.tensor([[1,0,0,0], [0,1,0,0], [0,0,cos(theta01),-sin(theta01)], [0,0,sin(theta01),cos(theta01)]])
assert_array_almost_equal(TTMatrix([so4_01[0].forward(), so4_01[1].forward()]).to_matrix(), true_so4_01, decimal=err_tol)
so4_12 = so4(1, 2)
theta12 = so4_12[0].theta
true_so4_12 = tl.tensor([[1,0,0,0], [0,cos(theta12),-sin(theta12),0], [0,sin(theta12),cos(theta12),0],[0,0,0,1]])
assert_array_almost_equal(TTMatrix([so4_12[0].forward(), so4_12[1].forward()]).to_matrix(), true_so4_12, decimal=err_tol)
so4_23 = so4(2, 3)
theta23 = so4_23[0].theta
true_so4_23 = tl.tensor([[cos(theta23),-sin(theta23),0,0], [sin(theta23),cos(theta23),0,0], [0,0,1,0], [0,0,0,1]])
assert_array_almost_equal(TTMatrix([so4_23[0].forward(), so4_23[1].forward()]).to_matrix(), true_so4_23, decimal=err_tol)
nqubits, nlayers, ncontraq = 4, 2, 1
unitary0, unitary1 = BinaryGatesUnitary(nqubits, ncontraq, so4_01, 0).forward(), BinaryGatesUnitary(nqubits, ncontraq, so4_01, 1).forward()
dims = tuple([2 for i in range(nqubits)])
rank = [1] + [2 for i in range(nqubits-1)] + [1]
state = random_tt(dims, rank=rank)
dense_state = state.to_tensor().reshape(-1,1)
true_unitary0 = tl.kron(true_so4_01, true_so4_01)
true_unitary1 = tl.kron(tl.kron(tl.tensor([[1,0],[0,1]]), true_so4_01), tl.tensor([[1,0],[0,0]])) + tl.kron(tl.kron(tl.tensor([[cos(theta01),-sin(theta01)], [sin(theta01),cos(theta01)]]), true_so4_01), tl.tensor([[0,0],[0,1]]))
eq = contraction_eq(nqubits, nlayers)
inner_prod = contract(eq, *state, *unitary1, *unitary0, *state)
true_inner_prod = tl.dot(tl.transpose(dense_state), tl.dot(true_unitary0, tl.dot(true_unitary1, dense_state)))
assert_array_almost_equal(inner_prod, true_inner_prod[0], decimal=err_tol)
nqubits = 5
unitary0, unitary1 = BinaryGatesUnitary(nqubits, ncontraq, so4_01, 0).forward(), BinaryGatesUnitary(nqubits, ncontraq, so4_01, 1).forward()
nlayers = 2
dims = tuple([2 for i in range(nqubits)])
rank = [1] + [2 for i in range(nqubits-1)] + [1]
state = random_tt(dims, rank=rank)
dense_state = state.to_tensor().reshape(-1,1)
true_unitary0 = tl.kron(true_so4_01, tl.kron(true_so4_01, tl.eye(2)))
true_unitary1 = tl.kron(tl.eye(2), tl.kron(true_so4_01, true_so4_01))
eq = contraction_eq(nqubits, nlayers)
inner_prod = contract(eq, *state, *unitary1, *unitary0, *state)
true_inner_prod = tl.dot(tl.transpose(dense_state), tl.dot(true_unitary0, tl.dot(true_unitary1, dense_state)))
assert_array_almost_equal(inner_prod, true_inner_prod[0], decimal=err_tol)
| 50.144654
| 231
| 0.666249
|
69a67ce0142aa7b14332d366f8f108ba0e6fe9e4
| 1,378
|
py
|
Python
|
src/arabdic/urls.py
|
Yeagoon/Arabic-Flashcard-Dictionary
|
4454fbc9f25375389d0fa7265ffb9a439d4696dc
|
[
"bzip2-1.0.6"
] | null | null | null |
src/arabdic/urls.py
|
Yeagoon/Arabic-Flashcard-Dictionary
|
4454fbc9f25375389d0fa7265ffb9a439d4696dc
|
[
"bzip2-1.0.6"
] | null | null | null |
src/arabdic/urls.py
|
Yeagoon/Arabic-Flashcard-Dictionary
|
4454fbc9f25375389d0fa7265ffb9a439d4696dc
|
[
"bzip2-1.0.6"
] | null | null | null |
"""arabdic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from contents.views import content_create_view, button_clicked, flashcard_view, button_save, first_flashcard_view, next_flashcard_view, prev_flashcard_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', content_create_view, name="search_page"),
path('output/', button_clicked, name="script"),
path('my flashcards next/', next_flashcard_view, name="flashcards_next"),
path('my flashcards prev/', prev_flashcard_view, name="flashcards_prev"),
path('my flashcard /', first_flashcard_view, name="first_flashcard"),
path('contents/', include('contents.urls'), name="flashcard_page"),
path('save content/', button_save, name="save")
]
| 41.757576
| 155
| 0.727141
|
aacb81e74b56db30c2318aab1297b95e166cc637
| 63,092
|
py
|
Python
|
pypy/interpreter/unicodehelper.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/interpreter/unicodehelper.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/interpreter/unicodehelper.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
import sys
from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rstring import StringBuilder
from rpython.rlib import rutf8
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.rtyper.lltypesystem import rffi
from pypy.module.unicodedata.interp_ucd import unicodedb
from rpython.rlib import runicode
@specialize.memo()
def decode_error_handler(space):
# Fast version of the "strict" errors handler.
def raise_unicode_exception_decode(errors, encoding, msg, s,
startingpos, endingpos):
raise OperationError(space.w_UnicodeDecodeError,
space.newtuple([space.newtext(encoding),
space.newbytes(s),
space.newint(startingpos),
space.newint(endingpos),
space.newtext(msg)]))
return raise_unicode_exception_decode
def decode_never_raise(errors, encoding, msg, s, startingpos, endingpos):
assert startingpos >= 0
assert endingpos >= 0
ux = ['\ux' + hex(ord(x))[2:].upper() for x in s[startingpos:endingpos]]
return ''.join(ux), endingpos, 'b', ''
@specialize.memo()
def encode_error_handler(space):
# Fast version of the "strict" errors handler.
def raise_unicode_exception_encode(errors, encoding, msg, utf8,
startingpos, endingpos):
u_len = rutf8.codepoints_in_utf8(utf8)
raise OperationError(space.w_UnicodeEncodeError,
space.newtuple([space.newtext(encoding),
space.newutf8(utf8, u_len),
space.newint(startingpos),
space.newint(endingpos),
space.newtext(msg)]))
return raise_unicode_exception_encode
def default_error_encode(
errors, encoding, msg, u, startingpos, endingpos):
"""A default handler, for tests"""
assert endingpos >= 0
if errors == 'replace':
return '?', endingpos
if errors == 'ignore':
return '', endingpos
raise ValueError
# ____________________________________________________________
_WIN32 = sys.platform == 'win32'
_MACOSX = sys.platform == 'darwin'
def fsdecode(space, w_string):
from pypy.module._codecs import interp_codecs
state = space.fromcache(interp_codecs.CodecState)
errorhandler=state.decode_error_handler
utf8 = space.bytes_w(w_string)
# fast path for ascii
if rutf8.first_non_ascii_char(utf8) < 0:
return space.newtext(utf8, len(utf8))
if _WIN32:
import pypy.interpreter.unicodehelper_win32 as win32
slen = len(utf8)
utf8, _, lgt = str_decode_utf8(utf8, 'surrogateescape', True, errorhandler)
elif 0 and _MACOSX:
utf8, lgt, pos = str_decode_utf8(utf8, 'surrogateescape', True,
errorhandler, allow_surrogates=False)
elif space.sys.filesystemencoding is None or state.codec_need_encodings:
# bootstrap check: if the filesystemencoding isn't initialized
# or the filesystem codec is implemented in Python we cannot
# use it before the codecs are ready. use the locale codec
# instead
from pypy.module._codecs.locale import (
str_decode_locale_surrogateescape)
utf8, lgt = str_decode_locale_surrogateescape(utf8)
else:
from pypy.module.sys.interp_encoding import getfilesystemencoding
return space.call_method(w_string, 'decode',
getfilesystemencoding(space),
space.newtext('surrogateescape'))
return space.newtext(utf8, lgt)
def fsencode(space, w_uni):
from pypy.module._codecs import interp_codecs
state = space.fromcache(interp_codecs.CodecState)
if _WIN32:
errorhandler=state.encode_error_handler
utf8 = space.utf8_w(w_uni)
bytes = utf8_encode_utf_8(utf8, 'surrogateescape', errorhandler)
elif 0 and _MACOSX:
utf8 = space.utf8_w(w_uni)
errorhandler=state.encode_error_handler,
bytes = utf8_encode_utf_8(utf8, 'surrogateescape', errorhandler,
allow_surrogates=False)
elif space.sys.filesystemencoding is None or state.codec_need_encodings:
# bootstrap check: if the filesystemencoding isn't initialized
# or the filesystem codec is implemented in Python we cannot
# use it before the codecs are ready. use the locale codec
# instead
from pypy.module._codecs.locale import (
utf8_encode_locale_surrogateescape)
utf8 = space.utf8_w(w_uni)
ulen = space.len_w(w_uni)
if '\x00' in utf8:
raise oefmt(space.w_ValueError, "embedded null character")
bytes = utf8_encode_locale_surrogateescape(utf8, ulen)
else:
from pypy.module.sys.interp_encoding import getfilesystemencoding
return space.call_method(w_uni, 'encode',
getfilesystemencoding(space),
space.newtext('surrogateescape'))
return space.newbytes(bytes)
def encode(space, w_data, encoding=None, errors='strict'):
from pypy.objspace.std.unicodeobject import encode_object
return encode_object(space, w_data, encoding, errors)
# These functions take and return unwrapped rpython strings
def decode_raw_unicode_escape(space, string):
return str_decode_raw_unicode_escape(
string, "strict",
final=True, errorhandler=decode_error_handler(space))
def check_ascii_or_raise(space, string):
try:
rutf8.check_ascii(string)
except rutf8.CheckError as e:
decode_error_handler(space)('strict', 'ascii',
'ordinal not in range(128)', string,
e.pos, e.pos + 1)
assert False, "unreachable"
def check_utf8_or_raise(space, string, start=0, end=-1):
# Surrogates are accepted and not treated specially at all.
# If there happen to be two 3-bytes encoding a pair of surrogates,
# you still get two surrogate unicode characters in the result.
# These are the Python3 rules, Python2 differs
assert isinstance(string, str)
try:
return rutf8.check_utf8(string, True, start, end)
except rutf8.CheckError as e:
decode_error_handler(space)('strict', 'utf-8',
'unexpected end of data', string,
e.pos, e.pos + 1)
def str_decode_ascii(s, errors, final, errorhandler):
try:
rutf8.check_ascii(s)
return s, len(s), len(s)
except rutf8.CheckError:
return _str_decode_ascii_slowpath(s, errors, final, errorhandler)
def _str_decode_ascii_slowpath(s, errors, final, errorhandler):
i = 0
res = StringBuilder()
while i < len(s):
ch = s[i]
if ord(ch) > 0x7F:
r, i, rettype, s = errorhandler(errors, 'ascii', 'ordinal not in range(128)',
s, i, i + 1)
res.append(r)
else:
res.append(ch)
i += 1
ress = res.build()
lgt = rutf8.check_utf8(ress, True)
return ress, lgt, lgt
def str_decode_latin_1(s, errors, final, errorhandler):
try:
rutf8.check_ascii(s)
return s, len(s), len(s)
except rutf8.CheckError:
return _str_decode_latin_1_slowpath(s, errors, final, errorhandler)
def _str_decode_latin_1_slowpath(s, errors, final, errorhandler):
res = StringBuilder(len(s))
i = 0
while i < len(s):
if ord(s[i]) > 0x7F:
while i < len(s) and ord(s[i]) > 0x7F:
rutf8.unichr_as_utf8_append(res, ord(s[i]))
i += 1
else:
start = i
end = i + 1
while end < len(s) and ord(s[end]) <= 0x7F:
end += 1
res.append_slice(s, start, end)
i = end
return res.build(), len(s), len(s)
class ErrorHandlerError(Exception):
def __init__(self, new, old):
self.new = new
self.old = old
def utf8_encode_utf_8(s, errors, errorhandler, allow_surrogates=False):
if len(s) == 0:
return ''
# two fast paths
if allow_surrogates:
# already valid utf-8 with surrogates, surrogates are allowed, so just
# return
return s
if not rutf8.has_surrogates(s):
# already valid utf-8 and doesn't contain surrogates, so we don't need
# to do anything
return s
# annoying slow path
return _utf8_encode_utf_8_deal_with_surrogates(s, errors, errorhandler)
def _utf8_encode_utf_8_deal_with_surrogates(s, errors, errorhandler):
pos = 0
upos = 0
result = StringBuilder(len(s))
while pos < len(s):
try:
rutf8.check_utf8(s, allow_surrogates=False, start=pos)
# otherwise the fast path above would have triggered
assert pos != 0
result.append_slice(s, pos, len(s))
break
except rutf8.CheckError as e:
end = e.pos
assert end >= 0
result.append_slice(s, pos, end)
upos += rutf8.codepoints_in_utf8(s, start=pos, end=end)
pos = end
# Try to get collect surrogates in one pass
# XXX do we care about performance in this case?
# XXX should this loop for more than one pair?
delta = 1
uchr = rutf8.codepoint_at_pos(s, pos)
if 0xD800 <= uchr <= 0xDBFF:
pos = rutf8.next_codepoint_pos(s, pos)
if pos < len(s):
uchr = rutf8.codepoint_at_pos(s, pos)
if 0xDC00 <= uchr <= 0xDFFF:
delta += 1
res, newindex, rettype, obj = errorhandler(errors, 'utf-8',
'surrogates not allowed', s, upos, upos + delta)
if rettype == 'u':
try:
rutf8.check_ascii(res)
except rutf8.CheckError:
# this is a weird behaviour of CPython, but it's what happens
errorhandler("strict", 'utf-8', 'surrogates not allowed', s, upos, upos + delta)
assert 0, "unreachable"
s = obj
result.append(res)
if newindex <= upos:
raise ErrorHandlerError(newindex, upos)
upos = newindex
pos = rutf8._pos_at_index(s, upos)
return result.build()
def utf8_encode_latin_1(s, errors, errorhandler, allow_surrogates=False):
try:
rutf8.check_ascii(s)
return s
except rutf8.CheckError, e:
return _utf8_encode_latin_1_slowpath(s, e.pos, errors, errorhandler)
def _utf8_encode_latin_1_slowpath(s, first_non_ascii_char, errors, errorhandler):
result = StringBuilder(len(s))
result.append_slice(s, 0, first_non_ascii_char)
pos = index = first_non_ascii_char
while pos < len(s):
ch = rutf8.codepoint_at_pos(s, pos)
if ch <= 0xFF:
result.append(chr(ch))
index += 1
pos = rutf8.next_codepoint_pos(s, pos)
else:
startindex = index
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
while pos < len(s) and rutf8.codepoint_at_pos(s, pos) > 0xFF:
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
msg = "ordinal not in range(256)"
res, newindex, rettype, obj = errorhandler(
errors, 'latin1', msg, s, startindex, index)
if rettype == 'u':
for cp in rutf8.Utf8StringIterator(res):
if cp > 0xFF:
errorhandler("strict", 'latin1', msg, s, startindex, index)
raise RuntimeError('error handler should not have returned')
result.append(chr(cp))
else:
for ch in res:
result.append(ch)
s = obj
if index != newindex: # Should be uncommon
index = newindex
pos = rutf8._pos_at_index(s, newindex)
return result.build()
def utf8_encode_ascii(s, errors, errorhandler, allow_surrogates=False):
""" Don't be confused - this is a slowpath for errors e.g. "ignore"
or an obscure errorhandler
"""
result = StringBuilder(len(s))
index = 0
pos = 0
while pos < len(s):
ch = rutf8.codepoint_at_pos(s, pos)
if ch <= 0x7F:
result.append(chr(ch))
index += 1
pos = rutf8.next_codepoint_pos(s, pos)
else:
startindex = index
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
while pos < len(s) and rutf8.codepoint_at_pos(s, pos) > 0x7F:
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
msg = "ordinal not in range(128)"
res, newindex, rettype, obj = errorhandler(
errors, 'ascii', msg, s, startindex, index)
if rettype == 'u':
for cp in rutf8.Utf8StringIterator(res):
if cp > 0x80:
errorhandler("strict", 'ascii', msg, s, startindex, index)
raise RuntimeError('error handler should not have returned')
result.append(chr(cp))
else:
for ch in res:
result.append(ch)
obj = s
pos = rutf8._pos_at_index(s, newindex)
return result.build()
if _WIN32:
import pypy.interpreter.unicodehelper_win32 as win32
def utf8_encode_mbcs(s, errors, errorhandler, allow_surrogates=False):
return win32.utf8_encode_mbcs(s, errors, errorhandler)
def utf8_encode_utf8(s, errors, errorhandler, allow_surrogates=False):
return win32.utf8_encode_utf8(s, errors, errorhandler)
def str_decode_mbcs(s, errors, final, errorhandler):
res, size = win32.str_decode_mbcs(s, errors, errorhandler, final=final)
return res, size, size
def utf8_encode_oem(s, errors, errorhandler, allow_surrogates=False):
res = win32.utf8_encode_oem(s, errors, errorhandler)
return res
def str_decode_oem(s, errors, final, errorhandler):
res, size = win32.str_decode_oem(s, errors, errorhandler, final)
return res, size, size
def utf8_encode_code_page(cp, s, errors, errorhandler, allow_surrogates=False):
res = win32.utf8_encode_code_page(cp, s, errors, errorhandler)
return res
def str_decode_code_page(cp, s, errors, final, errorhandler):
res, size = win32.str_decode_code_page(cp, s, errors, errorhandler, final)
return res, size, size
def str_decode_utf8(s, errors, final, errorhandler, allow_surrogates=False):
try:
# fast version first
return s, rutf8.check_utf8(s, allow_surrogates=allow_surrogates), len(s)
except rutf8.CheckError:
return _str_decode_utf8_slowpath(
s, errors, final, errorhandler, allow_surrogates=allow_surrogates)
def _str_decode_utf8_slowpath(s, errors, final, errorhandler, allow_surrogates):
""" Same as checking for the valid utf8, but we know the utf8 is not
valid so we're trying to either raise or pack stuff with error handler.
The key difference is that this is call_may_force.
In CPython this is done in unicode_decode_utf8, which has no
allow_surrogates. That argument is used in at least decode_utf8sp, in
interpreter.error._compute_value.
"""
if errors is None:
errors = 'strict'
result = StringBuilder(len(s))
pos = 0
while pos < len(s):
ordch1 = ord(s[pos])
# fast path for ASCII
# XXX maybe use a while loop here
if ordch1 <= 0x7F:
pos += 1
result.append(chr(ordch1))
continue
n = ord(runicode._utf8_code_length[ordch1 - 0x80])
if pos + n > len(s):
# argh, this obscure block of code is mostly a copy of
# what follows :-(
charsleft = len(s) - pos - 1 # either 0, 1, 2
# note: when we get the 'unexpected end of data' we need
# to care about the pos returned; it can be lower than len(s),
# in case we need to continue running this loop
if not charsleft:
# there's only the start byte and nothing else
if not final:
break
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'unexpected end of data',
s, pos, pos+1)
result.append(r)
continue
ordch2 = ord(s[pos+1])
if n == 3:
# 3-bytes seq with only a continuation byte
if rutf8._invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates):
# second byte invalid, take the first and continue
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+1)
result.append(r)
continue
else:
# second byte valid, but third byte missing
if not final:
break
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'unexpected end of data',
s, pos, pos+2)
result.append(r)
continue
elif n == 4:
# 4-bytes seq with 1 or 2 continuation bytes
if rutf8._invalid_byte_2_of_4(ordch1, ordch2):
# second byte invalid, take the first and continue
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+1)
result.append(r)
continue
elif charsleft == 2 and rutf8._invalid_byte_3_of_4(ord(s[pos+2])):
# third byte invalid, take the first two and continue
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+2)
result.append(r)
continue
else:
# there's only 1 or 2 valid cb, but the others are missing
if not final:
break
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'unexpected end of data',
s, pos, pos+charsleft+1)
result.append(r)
continue
raise AssertionError("unreachable")
if n == 0:
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid start byte',
s, pos, pos+1)
result.append(r)
elif n == 1:
assert 0, "ascii should have gone through the fast path"
elif n == 2:
ordch2 = ord(s[pos+1])
if rutf8._invalid_byte_2_of_2(ordch2):
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+1)
result.append(r)
continue
# 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz
result.append(chr(ordch1))
result.append(chr(ordch2))
pos += 2
elif n == 3:
ordch2 = ord(s[pos+1])
ordch3 = ord(s[pos+2])
if rutf8._invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates):
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+1)
result.append(r)
continue
elif rutf8._invalid_byte_3_of_3(ordch3):
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+2)
result.append(r)
continue
# 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz
result.append(chr(ordch1))
result.append(chr(ordch2))
result.append(chr(ordch3))
pos += 3
elif n == 4:
ordch2 = ord(s[pos+1])
ordch3 = ord(s[pos+2])
ordch4 = ord(s[pos+3])
if rutf8._invalid_byte_2_of_4(ordch1, ordch2):
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+1)
result.append(r)
continue
elif rutf8._invalid_byte_3_of_4(ordch3):
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+2)
result.append(r)
continue
elif rutf8._invalid_byte_4_of_4(ordch4):
r, pos, rettype, s = errorhandler(errors, 'utf-8',
'invalid continuation byte',
s, pos, pos+3)
result.append(r)
continue
# 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz
result.append(chr(ordch1))
result.append(chr(ordch2))
result.append(chr(ordch3))
result.append(chr(ordch4))
pos += 4
r = result.build()
# XXX can keep track of the resulting length without calling check_utf8
# here
return r, rutf8.check_utf8(r, True), pos
hexdigits = "0123456789ABCDEFabcdef"
def hexescape(builder, s, pos, digits,
encoding, errorhandler, message, errors):
chr = 0
if pos + digits > len(s):
endinpos = pos
while endinpos < len(s) and s[endinpos] in hexdigits:
endinpos += 1
r, pos, rettype, s = errorhandler(
errors, encoding, message, s, pos - 2, endinpos)
builder.append(r)
else:
try:
chr = int(s[pos:pos + digits], 16)
except ValueError:
endinpos = pos
while s[endinpos] in hexdigits:
endinpos += 1
r, pos, rettype, s = errorhandler(
errors, encoding, message, s, pos - 2, endinpos)
builder.append(r)
else:
# when we get here, chr is a 32-bit unicode character
try:
builder.append_code(chr)
pos += digits
except rutf8.OutOfRange:
message = "illegal Unicode character"
r, pos, rettype, s = errorhandler(
errors, encoding, message, s, pos - 2, pos + digits)
builder.append(r)
return pos, s
def str_decode_unicode_escape(s, errors, final, errorhandler, ud_handler):
if len(s) == 0:
return '', 0, 0, None
builder = rutf8.Utf8StringBuilder(len(s))
pos = 0
first_escape_error_char = None
while pos < len(s):
ch = s[pos]
# Non-escape characters are interpreted as Unicode ordinals
if ch != '\\':
if ord(ch) > 0x7F:
builder.append_code(ord(ch))
else:
builder.append(ch)
pos += 1
continue
# - Escapes
pos += 1
if pos >= len(s):
message = "\\ at end of string"
r, pos, rettype, s = errorhandler(errors, "unicodeescape",
message, s, pos - 1, len(s))
builder.append(r)
continue
ch = s[pos]
pos += 1
# \x escapes
if ch == '\n':
pass
elif ch == '\\':
builder.append_char('\\')
elif ch == '\'':
builder.append_char('\'')
elif ch == '\"':
builder.append_char('\"')
elif ch == 'b':
builder.append_char('\b')
elif ch == 'f':
builder.append_char('\f')
elif ch == 't':
builder.append_char('\t')
elif ch == 'n':
builder.append_char('\n')
elif ch == 'r':
builder.append_char('\r')
elif ch == 'v':
builder.append_char('\v')
elif ch == 'a':
builder.append_char('\a')
elif '0' <= ch <= '7':
x = ord(ch) - ord('0')
if pos < len(s):
ch = s[pos]
if '0' <= ch <= '7':
pos += 1
x = (x << 3) + ord(ch) - ord('0')
if pos < len(s):
ch = s[pos]
if '0' <= ch <= '7':
pos += 1
x = (x << 3) + ord(ch) - ord('0')
if x > 0x7F:
builder.append_code(x)
else:
builder.append_char(chr(x))
# hex escapes
# \xXX
elif ch == 'x':
digits = 2
message = "truncated \\xXX escape"
pos, s = hexescape(builder, s, pos, digits,
"unicodeescape", errorhandler, message, errors)
# \uXXXX
elif ch == 'u':
digits = 4
message = "truncated \\uXXXX escape"
pos, s = hexescape(builder, s, pos, digits,
"unicodeescape", errorhandler, message, errors)
# \UXXXXXXXX
elif ch == 'U':
digits = 8
message = "truncated \\UXXXXXXXX escape"
pos, s = hexescape(builder, s, pos, digits,
"unicodeescape", errorhandler, message, errors)
# \N{name}
elif ch == 'N' and ud_handler is not None:
message = "malformed \\N character escape"
look = pos
if look < len(s) and s[look] == '{':
# look for the closing brace
while look < len(s) and s[look] != '}':
look += 1
if look < len(s) and s[look] == '}':
# found a name. look it up in the unicode database
message = "unknown Unicode character name"
name = s[pos + 1:look]
code = ud_handler.call(name)
if code < 0:
r, pos, rettype, s = errorhandler(
errors, "unicodeescape", message,
s, pos - 1, look + 1)
builder.append(r)
continue
pos = look + 1
builder.append_code(code)
else:
r, pos, rettype, s = errorhandler(errors, "unicodeescape",
message, s, pos - 1, look + 1)
builder.append(r)
else:
r, pos, rettype, s = errorhandler(errors, "unicodeescape",
message, s, pos - 1, look + 1)
builder.append(r)
else:
builder.append_char('\\')
builder.append_code(ord(ch))
first_escape_error_char = ch
return builder.build(), builder.getlength(), pos, first_escape_error_char
def wcharpsize2utf8(space, wcharp, size):
"""Safe version of rffi.wcharpsize2utf8.
Raises app-level ValueError if any wchar value is outside the valid
codepoint range.
"""
if _WIN32:
import pypy.interpreter.unicodehelper_win32 as win32
# wcharp is actually utf16
return win32._unibuf_to_utf8(wcharp, size)
else:
try:
return rffi.wcharpsize2utf8(wcharp, size)
except rutf8.OutOfRange as e:
raise wrap_unicode_out_of_range_error(space, e)
def wrap_unicode_out_of_range_error(space, e):
raise oefmt(space.w_ValueError,
"character %s is not in range [U+0000; U+10ffff]", 'U+%x' % e.code)
# ____________________________________________________________
# Raw unicode escape
def str_decode_raw_unicode_escape(s, errors, final=False,
errorhandler=None):
if len(s) == 0:
return '', 0, 0
builder = rutf8.Utf8StringBuilder(len(s))
pos = 0
while pos < len(s):
ch = s[pos]
# Non-escape characters are interpreted as Unicode ordinals
if ch != '\\':
builder.append_code(ord(ch))
pos += 1
continue
# \u-escapes are only interpreted iff the number of leading
# backslashes is odd
bs = pos
while pos < len(s):
pos += 1
if pos == len(s) or s[pos] != '\\':
break
builder.append_char('\\')
# we have a backslash at the end of the string, stop here
if pos >= len(s):
builder.append_char('\\')
break
if ((pos - bs) & 1 == 0 or pos >= len(s) or
(s[pos] != 'u' and s[pos] != 'U')):
builder.append_char('\\')
builder.append_code(ord(s[pos]))
pos += 1
continue
if s[pos] == 'u':
digits = 4
message = "truncated \\uXXXX escape"
else:
digits = 8
message = "truncated \\UXXXXXXXX escape"
pos += 1
pos, s = hexescape(builder, s, pos, digits,
"rawunicodeescape", errorhandler, message, errors)
return builder.build(), builder.getlength(), pos
_utf8_encode_unicode_escape = rutf8.make_utf8_escape_function()
TABLE = '0123456789abcdef'
def raw_unicode_escape_helper(result, char):
if char >= 0x10000 or char < 0:
result.append("\\U")
zeros = 8
elif char >= 0x100:
result.append("\\u")
zeros = 4
else:
result.append("\\x")
zeros = 2
for i in range(zeros-1, -1, -1):
result.append(TABLE[(char >> (4 * i)) & 0x0f])
def utf8_encode_raw_unicode_escape(s, errors, errorhandler, allow_surrogates=False):
# errorhandler is not used: this function cannot cause Unicode errors
size = len(s)
if size == 0:
return ''
result = StringBuilder(size)
pos = 0
while pos < size:
oc = rutf8.codepoint_at_pos(s, pos)
if oc < 0x100:
result.append(chr(oc))
else:
raw_unicode_escape_helper(result, oc)
pos = rutf8.next_codepoint_pos(s, pos)
return result.build()
def utf8_encode_unicode_escape(s, errors, errorhandler, allow_surrogates=False):
return _utf8_encode_unicode_escape(s)
# ____________________________________________________________
# utf-7
# Three simple macros defining base-64
def _utf7_IS_BASE64(oc):
"Is c a base-64 character?"
c = chr(oc)
return c.isalnum() or c == '+' or c == '/'
def _utf7_TO_BASE64(n):
"Returns the base-64 character of the bottom 6 bits of n"
return "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[n & 0x3f]
def _utf7_FROM_BASE64(c):
"given that c is a base-64 character, what is its base-64 value?"
if c >= 'a':
return ord(c) - 71
elif c >= 'A':
return ord(c) - 65
elif c >= '0':
return ord(c) + 4
elif c == '+':
return 62
else: # c == '/'
return 63
def _utf7_DECODE_DIRECT(oc):
return oc <= 127 and oc != ord('+')
# The UTF-7 encoder treats ASCII characters differently according to
# whether they are Set D, Set O, Whitespace, or special (i.e. none of
# the above). See RFC2152. This array identifies these different
# sets:
# 0 : "Set D"
# alphanumeric and '(),-./:?
# 1 : "Set O"
# !"#$%&*;<=>@[]^_`{|}
# 2 : "whitespace"
# ht nl cr sp
# 3 : special (must be base64 encoded)
# everything else (i.e. +\~ and non-printing codes 0-8 11-12 14-31 127)
utf7_category = [
# nul soh stx etx eot enq ack bel bs ht nl vt np cr so si
3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3,
# dle dc1 dc2 dc3 dc4 nak syn etb can em sub esc fs gs rs us
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
# sp ! " # $ % & ' ( ) * + , - . /
2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 3, 0, 0, 0, 0,
# 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0,
# @ A B C D E F G H I J K L M N O
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# P Q R S T U V W X Y Z [ \ ] ^ _
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 1, 1, 1,
# ` a b c d e f g h i j k l m n o
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# p q r s t u v w x y z { | } ~ del
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3,
]
# ENCODE_DIRECT: this character should be encoded as itself. The
# answer depends on whether we are encoding set O as itself, and also
# on whether we are encoding whitespace as itself. RFC2152 makes it
# clear that the answers to these questions vary between
# applications, so this code needs to be flexible.
def _utf7_ENCODE_DIRECT(oc, directO, directWS):
return(oc < 128 and oc > 0 and
(utf7_category[oc] == 0 or
(directWS and utf7_category[oc] == 2) or
(directO and utf7_category[oc] == 1)))
def _utf7_ENCODE_CHAR(result, oc, base64bits, base64buffer):
if oc >= 0x10000:
# code first surrogate
base64bits += 16
base64buffer = (base64buffer << 16) | 0xd800 | ((oc-0x10000) >> 10)
while base64bits >= 6:
result.append(_utf7_TO_BASE64(base64buffer >> (base64bits-6)))
base64bits -= 6
# prepare second surrogate
oc = 0xDC00 | ((oc-0x10000) & 0x3FF)
base64bits += 16
base64buffer = (base64buffer << 16) | oc
while base64bits >= 6:
result.append(_utf7_TO_BASE64(base64buffer >> (base64bits-6)))
base64bits -= 6
return base64bits, base64buffer
def str_decode_utf_7(s, errors, final=False,
errorhandler=None):
if len(s) == 0:
return '', 0, 0
inShift = False
base64bits = 0
base64buffer = 0
surrogate = 0
outsize = 0
result = StringBuilder(len(s))
pos = 0
shiftOutStartPos = 0
startinpos = 0
while pos < len(s):
ch = s[pos]
if inShift: # in a base-64 section
if _utf7_IS_BASE64(ord(ch)): #consume a base-64 character
base64buffer = (base64buffer << 6) | _utf7_FROM_BASE64(ch)
assert base64buffer >= 0
base64bits += 6
pos += 1
if base64bits >= 16:
# enough bits for a UTF-16 value
outCh = base64buffer >> (base64bits - 16)
assert outCh >= 0
base64bits -= 16
base64buffer &= (1 << base64bits) - 1 # clear high bits
assert outCh <= 0xffff
if surrogate:
# expecting a second surrogate
if outCh >= 0xDC00 and outCh <= 0xDFFF:
code = (((surrogate & 0x3FF)<<10) |
(outCh & 0x3FF)) + 0x10000
rutf8.unichr_as_utf8_append(result, code)
outsize += 1
surrogate = 0
continue
else:
rutf8.unichr_as_utf8_append(result, surrogate,
allow_surrogates=True)
outsize += 1
surrogate = 0
# Not done with outCh: falls back to next line
if outCh >= 0xD800 and outCh <= 0xDBFF:
# first surrogate
surrogate = outCh
else:
outsize += 1
assert outCh >= 0
rutf8.unichr_as_utf8_append(result, outCh, True)
else:
# now leaving a base-64 section
inShift = False
if base64bits > 0: # left-over bits
if base64bits >= 6:
# We've seen at least one base-64 character
pos += 1
msg = "partial character in shift sequence"
r, pos, rettype, s = errorhandler(errors, 'utf7',
msg, s, pos-1, pos)
reslen = rutf8.check_utf8(r, True)
outsize += reslen
result.append(r)
continue
else:
# Some bits remain; they should be zero
if base64buffer != 0:
pos += 1
msg = "non-zero padding bits in shift sequence"
r, pos, rettype, s = errorhandler(errors, 'utf7',
msg, s, pos-1, pos)
reslen = rutf8.check_utf8(r, True)
outsize += reslen
result.append(r)
continue
if surrogate and _utf7_DECODE_DIRECT(ord(ch)):
outsize += 1
rutf8.unichr_as_utf8_append(result, surrogate, True)
surrogate = 0
if ch == '-':
# '-' is absorbed; other terminating characters are
# preserved
pos += 1
elif ch == '+':
startinpos = pos
pos += 1 # consume '+'
if pos < len(s) and s[pos] == '-': # '+-' encodes '+'
pos += 1
result.append('+')
outsize += 1
elif pos < len(s) and not _utf7_IS_BASE64(ord(s[pos])):
msg = "ill-formed sequence"
r, pos, rettype, s = errorhandler(errors, 'utf7', msg, s, pos-1, pos+1)
reslen = rutf8.check_utf8(r, True)
outsize += reslen
result.append(r)
else: # begin base64-encoded section
inShift = 1
surrogate = 0
shiftOutStartPos = result.getlength()
base64bits = 0
base64buffer = 0
elif _utf7_DECODE_DIRECT(ord(ch)): # character decodes at itself
result.append(ch)
outsize += 1
pos += 1
else:
startinpos = pos
pos += 1
msg = "unexpected special character"
r, pos, rettype, s = errorhandler(errors, 'utf7', msg, s, pos-1, pos)
reslen = rutf8.check_utf8(r, True)
outsize += reslen
result.append(r)
# end of string
final_length = result.getlength()
final_size = len(s)
if inShift and final: # in shift sequence, no more to follow
inShift = 0
if (surrogate or
base64bits >= 6 or
(base64bits > 0 and base64buffer != 0)):
# if we're in an inconsistent state, that's an error
msg = "unterminated shift sequence"
r, pos, rettype, s = errorhandler(errors, 'utf7', msg, s, shiftOutStartPos, pos)
reslen = rutf8.check_utf8(r, True)
outsize += reslen
result.append(r)
final_length = result.getlength()
elif inShift:
final_size = startinpos
final_length = shiftOutStartPos # back off output
assert final_length >= 0
return result.build()[:final_length], outsize, final_size
def utf8_encode_utf_7(s, errors, errorhandler, allow_surrogates=False):
# only uses s, other arguments are ignored
size = len(s)
if size == 0:
return ''
result = StringBuilder(size)
encodeSetO = encodeWhiteSpace = False
inShift = False
base64bits = 0
base64buffer = 0
pos = 0
while pos < size:
oc = rutf8.codepoint_at_pos(s, pos)
if not inShift:
if oc == ord('+'):
result.append('+-')
elif _utf7_ENCODE_DIRECT(oc, not encodeSetO, not encodeWhiteSpace):
result.append(chr(oc))
else:
result.append('+')
inShift = True
base64bits, base64buffer = _utf7_ENCODE_CHAR(
result, oc, base64bits, base64buffer)
else:
if _utf7_ENCODE_DIRECT(oc, not encodeSetO, not encodeWhiteSpace):
# shifting out
if base64bits: # output remaining bits
result.append(_utf7_TO_BASE64(base64buffer << (6-base64bits)))
base64buffer = 0
base64bits = 0
inShift = False
## Characters not in the BASE64 set implicitly unshift the
## sequence so no '-' is required, except if the character is
## itself a '-'
if _utf7_IS_BASE64(oc) or oc == ord('-'):
result.append('-')
result.append(chr(oc))
else:
base64bits, base64buffer = _utf7_ENCODE_CHAR(
result, oc, base64bits, base64buffer)
pos = rutf8.next_codepoint_pos(s, pos)
if base64bits:
result.append(_utf7_TO_BASE64(base64buffer << (6 - base64bits)))
if inShift:
result.append('-')
return result.build()
def decode_utf8sp(space, string):
# Surrogate-preserving utf-8 decoding. Assuming there is no
# encoding error, it should always be reversible, and the reverse is
# unused encode_utf8sp().
return str_decode_utf8(string, "string", True, decode_never_raise,
allow_surrogates=True)
# ____________________________________________________________
# utf-16
BYTEORDER = sys.byteorder
BYTEORDER2 = BYTEORDER[0] + 'e' # either "le" or "be"
assert BYTEORDER2 in ('le', 'be')
def str_decode_utf_16(s, errors, final=True,
errorhandler=None):
return str_decode_utf_16_helper(s, errors, final, errorhandler,
"native")[:3]
def str_decode_utf_16_be(s, errors, final=True,
errorhandler=None):
return str_decode_utf_16_helper(s, errors, final, errorhandler, "big",
'utf16-be')[:3]
def str_decode_utf_16_le(s, errors, final=True,
errorhandler=None):
return str_decode_utf_16_helper(s, errors, final, errorhandler, "little",
'utf16-le')[:3]
def str_decode_utf_16_helper(s, errors, final=True,
errorhandler=None,
byteorder="native",
public_encoding_name='utf16'):
bo = 0
if BYTEORDER == 'little':
ihi = 1
ilo = 0
else:
ihi = 0
ilo = 1
# Check for BOM marks (U+FEFF) in the input and adjust current
# byte order setting accordingly. In native mode, the leading BOM
# mark is skipped, in all other modes, it is copied to the output
# stream as-is (giving a ZWNBSP character).
pos = 0
if byteorder == 'native':
if len(s) >= 2:
bom = (ord(s[ihi]) << 8) | ord(s[ilo])
if BYTEORDER == 'little':
if bom == 0xFEFF:
pos += 2
bo = -1
elif bom == 0xFFFE:
pos += 2
bo = 1
else:
if bom == 0xFEFF:
pos += 2
bo = 1
elif bom == 0xFFFE:
pos += 2
bo = -1
elif byteorder == 'little':
bo = -1
else:
bo = 1
if len(s) == 0:
return '', 0, 0, bo
if bo == -1:
# force little endian
ihi = 1
ilo = 0
elif bo == 1:
# force big endian
ihi = 0
ilo = 1
result = StringBuilder(len(s) // 2)
while pos < len(s):
# remaining bytes at the end? (len(s) should be even)
if len(s) - pos < 2:
if not final:
break
r, pos, rettype, s = errorhandler(errors, public_encoding_name,
"truncated data",
s, pos, len(s))
result.append(r)
if len(s) - pos < 2:
break
ch = (ord(s[pos + ihi]) << 8) | ord(s[pos + ilo])
pos += 2
if ch < 0xD800 or ch > 0xDFFF:
rutf8.unichr_as_utf8_append(result, ch)
continue
# unexpected low surrogate
elif ch >= 0xDC00:
r, pos, rettype, s = errorhandler(errors, public_encoding_name,
"illegal encoding",
s, pos - 2, pos)
result.append(r)
continue
# UTF-16 code pair:
if len(s) - pos < 2:
pos -= 2
if not final:
break
errmsg = "unexpected end of data"
r, pos, rettype, s = errorhandler(errors, public_encoding_name,
errmsg, s, pos, len(s))
result.append(r)
else:
ch2 = (ord(s[pos+ihi]) << 8) | ord(s[pos+ilo])
pos += 2
if 0xDC00 <= ch2 <= 0xDFFF:
ch = (((ch & 0x3FF)<<10) | (ch2 & 0x3FF)) + 0x10000
rutf8.unichr_as_utf8_append(result, ch)
continue
else:
r, pos, rettype, s = errorhandler(errors, public_encoding_name,
"illegal UTF-16 surrogate",
s, pos - 4, pos - 2)
result.append(r)
r = result.build()
lgt = rutf8.check_utf8(r, True)
return r, lgt, pos, bo
def _STORECHAR(result, CH, byteorder):
hi = chr(((CH) >> 8) & 0xff)
lo = chr((CH) & 0xff)
if byteorder == 'little':
result.append(lo)
result.append(hi)
else:
result.append(hi)
result.append(lo)
def utf8_encode_utf_16_helper(s, errors,
errorhandler=None,
allow_surrogates=True,
byteorder='little',
public_encoding_name='utf16'):
if len(s) == 0:
if byteorder == 'native':
result = StringBuilder(2)
_STORECHAR(result, 0xFEFF, BYTEORDER)
return result.build()
return ""
result = StringBuilder(len(s) * 2 + 2)
if byteorder == 'native':
_STORECHAR(result, 0xFEFF, BYTEORDER)
byteorder = BYTEORDER
pos = 0
index = 0
while pos < len(s):
cp = rutf8.codepoint_at_pos(s, pos)
if cp < 0xD800:
_STORECHAR(result, cp, byteorder)
elif cp >= 0x10000:
_STORECHAR(result, 0xD800 | ((cp-0x10000) >> 10), byteorder)
_STORECHAR(result, 0xDC00 | ((cp-0x10000) & 0x3FF), byteorder)
elif cp >= 0xE000 or allow_surrogates:
_STORECHAR(result, cp, byteorder)
else:
r, newindex, rettype, s = errorhandler(
errors, public_encoding_name, 'surrogates not allowed',
s, index, index+1)
if rettype == 'u':
for cp in rutf8.Utf8StringIterator(r):
if cp < 0xD800 or allow_surrogates:
_STORECHAR(result, cp, byteorder)
else:
errorhandler('strict', public_encoding_name,
'surrogates not allowed',
s, index, index+1)
else:
for ch in r:
cp = ord(ch)
if cp < 0xD800 or allow_surrogates:
_STORECHAR(result, cp, byteorder)
else:
errorhandler('strict', public_encoding_name,
'surrogates not allowed',
s, index, index+1)
if index != newindex: # Should be uncommon
index = newindex
pos = rutf8._pos_at_index(s, newindex)
continue
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
return result.build()
def utf8_encode_utf_16(s, errors,
errorhandler=None,
allow_surrogates=False):
return utf8_encode_utf_16_helper(s, errors, errorhandler,
allow_surrogates, "native",
'utf-16-' + BYTEORDER2)
def utf8_encode_utf_16_be(s, errors,
errorhandler=None,
allow_surrogates=False):
return utf8_encode_utf_16_helper(s, errors, errorhandler,
allow_surrogates, "big",
'utf-16-be')
def utf8_encode_utf_16_le(s, errors,
errorhandler=None,
allow_surrogates=False):
return utf8_encode_utf_16_helper(s, errors, errorhandler,
allow_surrogates, "little",
'utf-16-le')
# ____________________________________________________________
# utf-32
def str_decode_utf_32(s, errors, final=True,
errorhandler=None):
return str_decode_utf_32_helper(
s, errors, final, errorhandler, "native", 'utf-32-' + BYTEORDER2,
allow_surrogates=False)[:3]
def str_decode_utf_32_be(s, errors, final=True,
errorhandler=None):
return str_decode_utf_32_helper(
s, errors, final, errorhandler, "big", 'utf-32-be',
allow_surrogates=False)[:3]
def str_decode_utf_32_le(s, errors, final=True,
errorhandler=None):
return str_decode_utf_32_helper(
s, errors, final, errorhandler, "little", 'utf-32-le',
allow_surrogates=False)[:3]
BOM32_DIRECT = intmask(0x0000FEFF)
BOM32_REVERSE = intmask(0xFFFE0000)
def str_decode_utf_32_helper(s, errors, final,
errorhandler,
byteorder="native",
public_encoding_name='utf32',
allow_surrogates=True):
assert errorhandler is not None
bo = 0
if BYTEORDER == 'little':
iorder0, iorder1, iorder2, iorder3 = 0, 1, 2, 3
else:
iorder0, iorder1, iorder2, iorder3 = 3, 2, 1, 0
# Check for BOM marks (U+FEFF) in the input and adjust current
# byte order setting accordingly. In native mode, the leading BOM
# mark is skipped, in all other modes, it is copied to the output
# stream as-is (giving a ZWNBSP character).
pos = 0
if byteorder == 'native':
if len(s) >= 4:
bom = intmask(
(ord(s[iorder3]) << 24) | (ord(s[iorder2]) << 16) |
(ord(s[iorder1]) << 8) | ord(s[iorder0]))
if BYTEORDER == 'little':
if bom == BOM32_DIRECT:
pos += 4
bo = -1
elif bom == BOM32_REVERSE:
pos += 4
bo = 1
else:
if bom == BOM32_DIRECT:
pos += 4
bo = 1
elif bom == BOM32_REVERSE:
pos += 4
bo = -1
elif byteorder == 'little':
bo = -1
else:
bo = 1
if len(s) == 0:
return '', 0, 0, bo
if bo == -1:
# force little endian
iorder0, iorder1, iorder2, iorder3 = 0, 1, 2, 3
elif bo == 1:
# force big endian
iorder0, iorder1, iorder2, iorder3 = 3, 2, 1, 0
result = StringBuilder(len(s) // 4)
while pos < len(s):
# remaining bytes at the end? (len(s) should be divisible by 4)
if len(s) - pos < 4:
if not final:
break
r, pos, rettype, s = errorhandler(errors, public_encoding_name,
"truncated data",
s, pos, len(s))
result.append(r)
if len(s) - pos < 4:
break
continue
ch = ((ord(s[pos + iorder3]) << 24) | (ord(s[pos + iorder2]) << 16) |
(ord(s[pos + iorder1]) << 8) | ord(s[pos + iorder0]))
if not allow_surrogates and 0xD800 <= ch <= 0xDFFF:
r, pos, rettype, obj = errorhandler(errors, public_encoding_name,
"code point in surrogate code point "
"range(0xd800, 0xe000)",
s, pos, pos + 4)
result.append(r)
continue
elif r_uint(ch) >= 0x110000:
r, pos, rettype, s = errorhandler(errors, public_encoding_name,
"codepoint not in range(0x110000)",
s, pos, len(s))
result.append(r)
continue
rutf8.unichr_as_utf8_append(result, ch, allow_surrogates=allow_surrogates)
pos += 4
r = result.build()
lgt = rutf8.check_utf8(r, True)
return r, lgt, pos, bo
def _STORECHAR32(result, CH, byteorder):
c0 = chr(((CH) >> 24) & 0xff)
c1 = chr(((CH) >> 16) & 0xff)
c2 = chr(((CH) >> 8) & 0xff)
c3 = chr((CH) & 0xff)
if byteorder == 'little':
result.append(c3)
result.append(c2)
result.append(c1)
result.append(c0)
else:
result.append(c0)
result.append(c1)
result.append(c2)
result.append(c3)
def utf8_encode_utf_32_helper(s, errors,
errorhandler=None,
allow_surrogates=True,
byteorder='little',
public_encoding_name='utf32'):
# s is utf8
if len(s) == 0:
if byteorder == 'native':
result = StringBuilder(4)
_STORECHAR32(result, 0xFEFF, BYTEORDER)
return result.build()
return ""
result = StringBuilder(len(s) * 4 + 4)
if byteorder == 'native':
_STORECHAR32(result, 0xFEFF, BYTEORDER)
byteorder = BYTEORDER
pos = 0
index = 0
while pos < len(s):
ch = rutf8.codepoint_at_pos(s, pos)
if not allow_surrogates and 0xD800 <= ch < 0xE000:
r, newindex, rettype, obj = errorhandler(
errors, public_encoding_name, 'surrogates not allowed',
s, index, index+1)
if rettype == 'u':
for ch in rutf8.Utf8StringIterator(r):
if ch < 0xD800:
_STORECHAR32(result, ch, byteorder)
else:
errorhandler(
'strict', public_encoding_name, 'surrogates not allowed',
s, index, index+1)
else:
for ch in r:
cp = ord(ch)
if cp < 0xD800:
_STORECHAR32(result, cp, byteorder)
else:
errorhandler(
'strict', public_encoding_name, 'surrogates not allowed',
s, index, index+1)
s = obj
if index != newindex: # Should be uncommon
index = newindex
pos = rutf8._pos_at_index(s, newindex)
continue
pos = rutf8.next_codepoint_pos(s, pos)
_STORECHAR32(result, ch, byteorder)
index += 1
return result.build()
def utf8_encode_utf_32(s, errors,
errorhandler=None, allow_surrogates=True):
return utf8_encode_utf_32_helper(s, errors, errorhandler,
allow_surrogates, "native",
'utf-32-' + BYTEORDER2)
def utf8_encode_utf_32_be(s, errors,
errorhandler=None, allow_surrogates=True):
return utf8_encode_utf_32_helper(s, errors, errorhandler,
allow_surrogates, "big",
'utf-32-be')
def utf8_encode_utf_32_le(s, errors,
errorhandler=None, allow_surrogates=True):
return utf8_encode_utf_32_helper(s, errors, errorhandler,
allow_surrogates, "little",
'utf-32-le')
# ____________________________________________________________
# Charmap
ERROR_CHAR = u'\ufffe'.encode('utf8')
@specialize.argtype(4)
def str_decode_charmap(s, errors, final=False,
errorhandler=None, mapping=None):
"mapping can be a rpython dictionary, or a dict-like object."
# Default to Latin-1
if mapping is None:
return str_decode_latin_1(s, errors, final=final,
errorhandler=errorhandler)
if len(s) == 0:
return '', 0, 0
pos = 0
result = StringBuilder(len(s))
while pos < len(s):
ch = s[pos]
c = mapping.get(ord(ch), ERROR_CHAR)
if c == ERROR_CHAR:
r, pos, rettype, s = errorhandler(errors, "charmap",
"character maps to <undefined>",
s, pos, pos + 1)
result.append(r)
continue
result.append(c)
pos += 1
r = result.build()
lgt = rutf8.codepoints_in_utf8(r)
return r, lgt, pos
def utf8_encode_charmap(s, errors, errorhandler=None, mapping=None, allow_surrogates=False):
if mapping is None:
return utf8_encode_latin_1(s, errors, errorhandler=errorhandler)
if len(s) == 0:
return ''
result = StringBuilder(len(s))
pos = 0
index = 0
while pos < len(s):
ch = rutf8.codepoint_at_pos(s, pos)
c = mapping.get(ch, '')
if len(c) == 0:
# collect all unencodable chars.
startindex = index
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
while (pos < len(s) and
mapping.get(rutf8.codepoint_at_pos(s, pos), '') == ''):
pos = rutf8.next_codepoint_pos(s, pos)
index += 1
r, newindex, rettype, obj = errorhandler(errors, "charmap",
"character maps to <undefined>",
s, startindex, index)
if rettype == 'u':
for cp2 in rutf8.Utf8StringIterator(r):
ch2 = mapping.get(cp2, '')
if not ch2:
errorhandler(
"strict", "charmap", "character maps to <undefined>",
s, startindex, index)
result.append(ch2)
else:
for ch in r:
result.append(ch)
s = obj
if index != newindex: # Should be uncommon
index = newindex
pos = rutf8._pos_at_index(s, newindex)
continue
result.append(c)
index += 1
pos = rutf8.next_codepoint_pos(s, pos)
return result.build()
# ____________________________________________________________
# Decimal Encoder
def unicode_encode_decimal(s, errors, errorhandler=None, allow_surrogates=False):
"""Converts whitespace to ' ', decimal characters to their
corresponding ASCII digit and all other Latin-1 characters except
\0 as-is. Characters outside this range (Unicode ordinals 1-256)
are treated as errors. This includes embedded NULL bytes.
"""
if errorhandler is None:
errorhandler = default_error_encode
result = StringBuilder(len(s))
pos = 0
i = 0
it = rutf8.Utf8StringIterator(s)
for ch in it:
if unicodedb.isspace(ch):
result.append(' ')
i += 1
continue
try:
decimal = unicodedb.decimal(ch)
except KeyError:
pass
else:
result.append(chr(48 + decimal))
i += 1
continue
if 0 < ch < 256:
result.append(chr(ch))
i += 1
continue
# All other characters are considered unencodable
start_index = i
i += 1
while not it.done():
ch = rutf8.codepoint_at_pos(s, it.get_pos())
try:
if (0 < ch < 256 or unicodedb.isspace(ch) or
unicodedb.decimal(ch) >= 0):
break
except KeyError:
# not a decimal
pass
if it.done():
break
ch = next(it)
i += 1
end_index = i
msg = "invalid decimal Unicode string"
r, pos, retype, obj = errorhandler(
errors, 'decimal', msg, s, start_index, end_index)
for ch in rutf8.Utf8StringIterator(r):
if unicodedb.isspace(ch):
result.append(' ')
continue
try:
decimal = unicodedb.decimal(ch)
except KeyError:
pass
else:
result.append(chr(48 + decimal))
continue
if 0 < ch < 256:
result.append(chr(ch))
continue
errorhandler('strict', 'decimal', msg, s, start_index, end_index)
return result.build()
| 37.84763
| 100
| 0.511333
|
5a1f4243d2a44f22c6e45dae2d06286b90d66cb6
| 15,805
|
py
|
Python
|
tests/validation_test.py
|
genitrust/pycoin
|
b0daefdd69fa0400cc48ae16f923f03d366db7b4
|
[
"MIT"
] | null | null | null |
tests/validation_test.py
|
genitrust/pycoin
|
b0daefdd69fa0400cc48ae16f923f03d366db7b4
|
[
"MIT"
] | null | null | null |
tests/validation_test.py
|
genitrust/pycoin
|
b0daefdd69fa0400cc48ae16f923f03d366db7b4
|
[
"MIT"
] | 1
|
2020-05-20T09:53:27.000Z
|
2020-05-20T09:53:27.000Z
|
import binascii
import unittest
from pycoin.block import Block
from pycoin.coins.bitcoin.networks import BitcoinMainnet
from pycoin.coins.bitcoin.ScriptTools import BitcoinScriptTools
from pycoin.ecdsa.secp256k1 import secp256k1_generator
from pycoin.serialize import h2b
from pycoin.tx.Tx import Tx
class ValidationTest(unittest.TestCase):
def setUp(self):
self._key = BitcoinMainnet.extras.Key(1, secp256k1_generator)
def test_validate_multisig_tx(self):
# this is a transaction in the block chain
# the unspents are included too, so it can be validated
TX_HEX = (
"01000000025718fb915fb8b3a802bb699ddf04dd91261ef6715f5f2820a2b1b9b7e38b"
"4f27000000004a004830450221008c2107ed4e026ab4319a591e8d9ec37719cdea0539"
"51c660566e3a3399428af502202ecd823d5f74a77cc2159d8af2d3ea5d36a702fef9a7"
"edaaf562aef22ac35da401ffffffff038f52231b994efb980382e4d804efeadaee13cf"
"e01abe0d969038ccb45ec17000000000490047304402200487cd787fde9b337ab87f9f"
"e54b9fd46d5d1692aa58e97147a4fe757f6f944202203cbcfb9c0fc4e3c453938bbea9"
"e5ae64030cf7a97fafaf460ea2cb54ed5651b501ffffffff0100093d00000000001976"
"a9144dc39248253538b93d3a0eb122d16882b998145888ac0000000002000000000000"
"004751210351efb6e91a31221652105d032a2508275f374cea63939ad72f1b1e02f477"
"da782100f2b7816db49d55d24df7bdffdbc1e203b424e8cd39f5651ab938e5e4a19356"
"9e52ae404b4c00000000004751210351efb6e91a31221652105d032a2508275f374cea"
"63939ad72f1b1e02f477da7821004f0331742bbc917ba2056a3b8a857ea47ec088dd10"
"475ea311302112c9d24a7152ae")
tx = Tx.from_hex(TX_HEX)
self.assertEqual(tx.id(), "70c4e749f2b8b907875d1483ae43e8a6790b0c8397bbb33682e3602617f9a77a")
self.assertEqual(tx.bad_signature_count(), 0)
def test_validate_block_data(self):
# block 80971
block_80971_id = '00000000001126456C67A1F5F0FF0268F53B4F22E0531DC70C7B69746AF69DAC'.lower()
block_80971_data = h2b(
"01000000950A1631FB9FAC411DFB173487B9E18018B7C6F7147E78C062584100000000"
"00A881352F97F14BF191B54915AE124E051B8FE6C3922C5082B34EAD503000FC34D891"
"974CED66471B4016850A04010000000100000000000000000000000000000000000000"
"00000000000000000000000000FFFFFFFF0804ED66471B02C301FFFFFFFF0100F2052A"
"01000000434104CB6B6B4EADC96C7D08B21B29D0ADA5F29F9378978CABDB602B8B65DA"
"08C8A93CAAB46F5ABD59889BAC704925942DD77A2116D10E0274CAD944C71D3D1A6705"
"70AC0000000001000000018C55ED829F16A4E43902940D3D33005264606D5F7D555B5F"
"67EE4C033390C2EB010000008A47304402202D1BF606648EDCDB124C1254930852D991"
"88E1231715031CBEAEA80CCFD2B39A02201FA9D6EE7A1763580E342474FC1AEF59B046"
"8F98479953437F525063E25675DE014104A01F763CFBF5E518C628939158AF3DC0CAAC"
"35C4BA7BC1CE8B7E634E8CDC44E15F0296B250282BD649BAA8398D199F2424FCDCD88D"
"3A9ED186E4FD3CB9BF57CFFFFFFFFF02404B4C00000000001976A9148156FF75BEF24B"
"35ACCE3C05289A2411E1B0E57988AC00AA38DF010000001976A914BC7E692A5FFE95A5"
"96712F5ED83393B3002E452E88AC0000000001000000019C97AFDF6C9A31FFA86D71EA"
"79A079001E2B59EE408FD418498219400639AC0A010000008B4830450220363CFFAE09"
"599397B21E6D8A8073FB1DFBE06B6ACDD0F2F7D3FEA86CA9C3F605022100FA255A6ED2"
"3FD825C759EF1A885A31CAD0989606CA8A3A16657D50FE3CEF5828014104FF444BAC08"
"308B9EC97F56A652AD8866E0BA804DA97868909999566CB377F4A2C8F1000E83B49686"
"8F3A282E1A34DF78565B65C15C3FA21A0763FD81A3DFBBB6FFFFFFFF02C05EECDE0100"
"00001976A914588554E6CC64E7343D77117DA7E01357A6111B7988AC404B4C00000000"
"001976A914CA6EB218592F289999F13916EE32829AD587DBC588AC0000000001000000"
"01BEF5C9225CB9FE3DEF929423FA36AAD9980B9D6F8F3070001ACF3A5FB389A69F0000"
"00004A493046022100FB23B1E2F2FB8B96E04D220D385346290A9349F89BBBC5C225D5"
"A56D931F8A8E022100F298EB28294B90C1BAF319DAB713E7CA721AAADD8FCC15F849DE"
"7B0A6CF5412101FFFFFFFF0100F2052A010000001976A9146DDEA8071439951115469D"
"0D2E2B80ECBCDD48DB88AC00000000")
# block 80974
block_80974_id = '0000000000089F7910F6755C10EA2795EC368A29B435D80770AD78493A6FECF1'.lower()
block_80974_data = h2b(
"010000007480150B299A16BBCE5CCDB1D1BBC65CFC5893B01E6619107C552000000000"
"007900A2B203D24C69710AB6A94BEB937E1B1ADD64C2327E268D8C3E5F8B41DBED8796"
"974CED66471B204C324703010000000100000000000000000000000000000000000000"
"00000000000000000000000000FFFFFFFF0804ED66471B024001FFFFFFFF0100F2052A"
"010000004341045FEE68BAB9915C4EDCA4C680420ED28BBC369ED84D48AC178E1F5F7E"
"EAC455BBE270DABA06802145854B5E29F0A7F816E2DF906E0FE4F6D5B4C9B92940E4F0"
"EDAC000000000100000001F7B30415D1A7BF6DB91CB2A272767C6799D721A4178AA328"
"E0D77C199CB3B57F010000008A4730440220556F61B84F16E637836D2E74B8CB784DE4"
"0C28FE3EF93CCB7406504EE9C7CAA5022043BD4749D4F3F7F831AC696748AD8D8E79AE"
"B4A1C539E742AA3256910FC88E170141049A414D94345712893A828DE57B4C2054E2F5"
"96CDCA9D0B4451BA1CA5F8847830B9BE6E196450E6ABB21C540EA31BE310271AA00A49"
"ED0BA930743D1ED465BAD0FFFFFFFF0200E1F505000000001976A914529A63393D63E9"
"80ACE6FA885C5A89E4F27AA08988ACC0ADA41A000000001976A9145D17976537F30886"
"5ED533CCCFDD76558CA3C8F088AC00000000010000000165148D894D3922EF5FFDA962"
"BE26016635C933D470C8B0AB7618E869E3F70E3C000000008B48304502207F5779EBF4"
"834FEAEFF4D250898324EB5C0833B16D7AF4C1CB0F66F50FCF6E85022100B78A65377F"
"D018281E77285EFC31E5B9BA7CB7E20E015CF6B7FA3E4A466DD195014104072AD79E0A"
"A38C05FA33DD185F84C17F611E58A8658CE996D8B04395B99C7BE36529CAB7606900A0"
"CD5A7AEBC6B233EA8E0FE60943054C63620E05E5B85F0426FFFFFFFF02404B4C000000"
"00001976A914D4CAA8447532CA8EE4C80A1AE1D230A01E22BFDB88AC8013A0DE010000"
"001976A9149661A79AE1F6D487AF3420C13E649D6DF3747FC288AC00000000")
block_80971 = Block.from_bin(block_80971_data)
self.assertEqual(block_80971.id(), block_80971_id)
block_80974 = Block.from_bin(block_80974_data)
self.assertEqual(block_80974.id(), block_80974_id)
tx_db = {tx.hash(): tx for tx in block_80971.txs}
tx_to_validate = block_80974.txs[2]
self.assertEqual("OP_DUP OP_HASH160 [d4caa8447532ca8ee4c80a1ae1d230a01e22bfdb] OP_EQUALVERIFY OP_CHECKSIG",
BitcoinScriptTools.disassemble(tx_to_validate.txs_out[0].script))
self.assertEqual(tx_to_validate.id(), "7c4f5385050c18aa8df2ba50da566bbab68635999cc99b75124863da1594195b")
tx_to_validate.unspents_from_db(tx_db)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
# now, let's corrupt the Tx and see what happens
tx_out = tx_to_validate.txs_out[1]
disassembly = BitcoinScriptTools.disassemble(tx_out.script)
tx_out.script = BitcoinScriptTools.compile(disassembly)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
disassembly = disassembly.replace("9661a79ae1f6d487af3420c13e649d6df3747fc2",
"9661a79ae1f6d487af3420c13e649d6df3747fc3")
tx_out.script = BitcoinScriptTools.compile(disassembly)
self.assertEqual(tx_to_validate.bad_signature_count(), 1)
self.assertFalse(tx_to_validate.is_signature_ok(0))
def test_validate_two_inputs(self):
def tx_from_b64(h):
d = binascii.a2b_base64(h.encode("utf8"))
return Tx.from_bin(d)
# tx_0 = c9989d984c97128b03b9f118481c631c584f7aa42b578dbea6194148701b053d
# This is the one we're going to validate. It has inputs from
# tx_1 = b52201c2741d410b70688335afebba0d58f8675fa9b6c8c54becb0d7c0a75983
# and tx_2 = 72151f65db1d8594df90778639a4c0c17c1e303af01de0d04af8fac13854bbfd
TX_0_HEX = (
"AQAAAAKDWafA17DsS8XItqlfZ/hYDbrrrzWDaHALQR10wgEitQAAAACLSDBFAiAnyvQ1P7"
"b8+84JbBUbE1Xtgrd0KNpD4eyVTNU/burbtgIhAOS8T1TrhXkGXQTGbLSEJy5uvZMGEzOj"
"ITxO+DrykiPlAUEE3yJcIB5OCpaDjrop+N3bm8h9PKw8bF/YB4v3yD+VeQf4fXdUZ9hJJS"
"nFeJ+QeJrC7q3Y23QSYeYbW/AfA3D5G//////9u1Q4wfr4StDgHfA6MB58wcCkOYZ3kN+U"
"hR3bZR8VcgAAAACLSDBFAiAN6ZQr+9HTgmF57EsPyXIhQ6J5M4lgwlj/tJTShZ+toQIhAL"
"0U1i9yiCEm75uCEp8uRaySqS7P4x7A+L2Vr5kS+7ANAUEEkSqVI6gw1scM0GuJWgMh4jpW"
"KJA0yOl03uQaV/jHURn+HswOIORzvsG9qQY1/9BZgDPaMuI5U5JlyA3WkhLxgf////8Ctk"
"SUzxAAAAAZdqkULXTu3lp2t/wMSuvqbifOSj9/kvmIrAAoa+4AAAAAGXapFF3ySpVdjz9V"
"8fRKvzDqXQRcmowSiKwAAAAA")
TX_1_HEX = (
"AQAAAAEL3YmFDcZpf4SH7uN1IBmMoBd4OhmTp4EAQ8A0ZQ3tiwAAAACKRzBEAiA4Fkl8lk"
"JSeLtWHsp1j0h7y0KKFmqxhDR0CK0HnmZWBQIgDSTDenor3zbNqTs+FApeDl8DKCz1xGQC"
"JQN0/sp00VABQQQzSNc33wdDXA/F9y9/hAR88q6Se6vRCHEC7dYgbIp1pgxqGzrWXQroGk"
"QLhnAbn/fDhUoVbCgM/UHXYmjXlhdO/////wI3HGlfEQAAABl2qRRM+dhUVUjeAlb0jEsH"
"JrFClGGSZ4isMAYVCgAAAAAZdqkUgnSLXoYTeOKFFRdtLYxWcGZ2Ht2IrAAAAAA=")
TX_2_HEX = (
"AQAAAAFDjBbw61AYUWMx+3moZ2vb9dvLKydOSFIwcfBTjG0QSgEAAACKRzBEAiA5WWKhR4"
"8OI60ZDCXnOru/FH6NvuTGhRLggjbpJB2dhgIgKp0FFL0ClSCxxqGjYneDinvgROGSw6Dt"
"Vtvflrhaom8BQQR50YjAg1e5qRkP4ER29ec5jKfzk3DHJhS7Si0sEbvNIJMfjjbZfZWtJi"
"15wHZhuHh4e3G6SWMdJLHH5pgbseFh/////wLPE5deAAAAABl2qRSmRdbMvv5fEbgFD1Yk"
"taBU9zQTW4iswJ7mBQAAAAAZdqkU4E5+Is4tr+8bPU6ELYHSvz/Ng0eIrAAAAAA=")
tx_0 = tx_from_b64(TX_0_HEX)
self.assertEqual(tx_0.id(), "c9989d984c97128b03b9f118481c631c584f7aa42b578dbea6194148701b053d")
tx_1 = tx_from_b64(TX_1_HEX)
self.assertEqual(tx_1.id(), "b52201c2741d410b70688335afebba0d58f8675fa9b6c8c54becb0d7c0a75983")
tx_2 = tx_from_b64(TX_2_HEX)
self.assertEqual(tx_2.id(), "72151f65db1d8594df90778639a4c0c17c1e303af01de0d04af8fac13854bbfd")
TX_DB = {tx.hash(): tx for tx in [tx_0, tx_1, tx_2]}
tx_to_validate = tx_0
self.assertEqual("OP_DUP OP_HASH160 [2d74eede5a76b7fc0c4aebea6e27ce4a3f7f92f9] OP_EQUALVERIFY OP_CHECKSIG",
BitcoinScriptTools.disassemble(tx_to_validate.txs_out[0].script))
self.assertEqual(tx_to_validate.id(), "c9989d984c97128b03b9f118481c631c584f7aa42b578dbea6194148701b053d")
tx_to_validate.unspents_from_db(TX_DB)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
# now let's mess with signatures
disassembly = BitcoinScriptTools.disassemble(tx_to_validate.txs_in[0].script)
tx_to_validate.txs_in[0].script = BitcoinScriptTools.compile(disassembly)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
disassembly = disassembly.replace("353fb6fcfbce09", "353fb6fcfbce19")
tx_to_validate.txs_in[0].script = BitcoinScriptTools.compile(disassembly)
self.assertEqual(tx_to_validate.bad_signature_count(), 1)
self.assertFalse(tx_to_validate.is_signature_ok(0))
tx_to_validate = tx_from_b64(TX_0_HEX)
tx_to_validate.unspents_from_db(TX_DB)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
disassembly = BitcoinScriptTools.disassemble(tx_to_validate.txs_in[1].script)
disassembly = disassembly.replace("960c258ffb494d2859f", "960d258ffb494d2859f")
tx_to_validate.txs_in[1].script = BitcoinScriptTools.compile(disassembly)
self.assertEqual(tx_to_validate.bad_signature_count(), 1)
self.assertFalse(tx_to_validate.is_signature_ok(1))
# futz with signature on tx_1
tx_to_validate = tx_from_b64(TX_0_HEX)
original_tx_hash = tx_1.hash()
disassembly = BitcoinScriptTools.disassemble(tx_1.txs_out[0].script)
disassembly = disassembly.replace("4cf9d8545548de0256f48c4b0726b14294619267",
"4cf9d8545548de1256f48c4b0726b14294619267")
tx_1.txs_out[0].script = BitcoinScriptTools.compile(disassembly)
TX_DB[original_tx_hash] = tx_1
tx_to_validate.unspents_from_db(TX_DB, ignore_missing=True)
self.assertEqual(tx_to_validate.bad_signature_count(), 1)
self.assertFalse(tx_to_validate.is_signature_ok(0, ))
# fix it up again
TX_DB[original_tx_hash] = tx_from_b64(TX_1_HEX)
tx_to_validate.unspents_from_db(TX_DB)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
# futz with signature on tx_2
tx_to_validate = tx_from_b64(TX_0_HEX)
original_tx_hash = tx_2.hash()
disassembly = BitcoinScriptTools.disassemble(tx_2.txs_out[0].script)
disassembly = disassembly.replace("a645d6ccbefe5f11b8050f5624b5a054f734135b",
"a665d6ccbefe5f11b8050f5624b5a054f734135b")
tx_2.txs_out[0].script = BitcoinScriptTools.compile(disassembly)
TX_DB[original_tx_hash] = tx_2
tx_to_validate.unspents_from_db(TX_DB, ignore_missing=True)
self.assertEqual(tx_to_validate.bad_signature_count(), 1)
self.assertFalse(tx_to_validate.is_signature_ok(1))
# fix it up again
TX_DB[original_tx_hash] = tx_from_b64(TX_2_HEX)
tx_to_validate.unspents_from_db(TX_DB)
self.assertEqual(tx_to_validate.bad_signature_count(), 0)
def _make_tx(self, input_script, other_scripts=[]):
from pycoin.tx.tx_utils import create_signed_tx
from pycoin.solve.utils import build_p2sh_lookup
cv = int(50*1e8)
key = self._key
sec = key.sec()
wif = key.wif()
address = key.address()
p2sh_lookup = build_p2sh_lookup(other_scripts)
coinbase_tx = Tx.coinbase_tx(public_key_sec=sec, coin_value=cv)
coinbase_tx.txs_out[0].script = input_script
spendable = coinbase_tx.tx_outs_as_spendable()[0]
payables = [(address, cv)]
tx = create_signed_tx(spendables=[spendable], payables=payables, wifs=[wif], p2sh_lookup=p2sh_lookup)
tx.unspents = [spendable]
print(tx.as_hex(include_unspents=True))
return tx
def test_validate_p2pkh(self):
us_1 = BitcoinMainnet.ui._script_info.script_for_p2pkh(self._key.hash160())
tx = self._make_tx(us_1)
tx.check_solution(0)
def test_validate_p2s_of_p2pkh(self):
us_1 = BitcoinMainnet.ui._script_info.script_for_p2pkh(self._key.hash160())
us_2 = BitcoinMainnet.ui._script_info.script_for_p2s(us_1)
tx = self._make_tx(us_2, [us_1])
tx.check_solution(0)
def test_validate_p2pkh_wit(self):
us_1 = BitcoinMainnet.ui._script_info.script_for_p2pkh_wit(self._key.hash160())
tx = self._make_tx(us_1)
tx.check_solution(0)
def test_validate_p2s_wit_of_p2pkh(self):
us_1 = BitcoinMainnet.ui._script_info.script_for_p2pkh_wit(self._key.hash160())
us_2 = BitcoinMainnet.ui._script_info.script_for_p2s(us_1)
tx = self._make_tx(us_2, [us_1])
self.assertEqual(tx.id(), "1e5d967a3778bfa4e0d90f35f59530e8033a36bd7fd1d9e617c504054b89bd3a")
tx.check_solution(0)
def test_validate_p2s_of_p2s_wit_of_p2pkh(self):
us_1 = BitcoinMainnet.ui._script_info.script_for_p2pkh(self._key.hash160())
us_2 = BitcoinMainnet.ui._script_info.script_for_p2s_wit(us_1)
us_3 = BitcoinMainnet.ui._script_info.script_for_p2s(us_2)
tx = self._make_tx(us_3, [us_1, us_2])
self.assertEqual(tx.id(), "54a518b82b464744951531270c1bcec133c515fcdbe9d70c6141e067a62ff640")
tx.check_solution(0)
if __name__ == "__main__":
unittest.main()
| 56.648746
| 115
| 0.754761
|
fee3ccaa81c7f41f616d76f2fc90765e03d5a455
| 4,876
|
py
|
Python
|
smart_citekeys.py
|
tsutterley/reference-toolkit
|
b5cc6bcf78ca072d7ec1a588e73ab3d01d8b577d
|
[
"MIT"
] | 3
|
2020-12-01T13:40:08.000Z
|
2021-04-05T12:13:33.000Z
|
smart_citekeys.py
|
tsutterley/reference-toolkit
|
b5cc6bcf78ca072d7ec1a588e73ab3d01d8b577d
|
[
"MIT"
] | 1
|
2020-11-30T22:58:44.000Z
|
2020-12-02T22:09:17.000Z
|
smart_citekeys.py
|
tsutterley/reference-toolkit
|
b5cc6bcf78ca072d7ec1a588e73ab3d01d8b577d
|
[
"MIT"
] | 1
|
2022-02-04T06:36:50.000Z
|
2022-02-04T06:36:50.000Z
|
#!/usr/bin/env python
u"""
smart_citekeys.py (12/2020)
Generates Papers2-like cite keys for BibTeX using information from crossref.org
Enter DOI's of journals to generate "universal" keys
CALLING SEQUENCE:
python smart_citekeys.py "10.1038/ngeo102"
will result in Rignot:2008ct as the citekey
PYTHON DEPENDENCIES:
future: Compatibility layer between Python 2 and Python 3
http://python-future.org/
PROGRAM DEPENDENCIES:
language_conversion.py: Outputs map for converting symbols between languages
NOTES:
Papers2 Universal Citekey generation javascript
https://github.com/cparnot/universal-citekey-js
Check unicode characters with http://www.fileformat.info/
UPDATE HISTORY:
Updated 12/2020: using argparse to set command line options
Updated 07/2019: modifications for python3 string compatibility
Updated 07/2018: using python3 urllib.request with future library
Updated 10/2017: use modulus of 0xffffffff (4294967295)
Updated 09/2017: use timeout of 20 to prevent socket.timeout
Forked 05/2017 from gen_citekeys.py to use information from crossref.org
Updated 05/2017: removing whitespace from authors.
Converting special characters with language_conversion program
Updated 02/2017: universal citekeys from DOI or title hashes
(will create the same citekeys as the Papers2 application)
Written 02/2017
"""
from __future__ import print_function
import future.standard_library
import sys
import os
import re
import ssl
import math
import json
import binascii
import argparse
import posixpath
from language_conversion import language_conversion
with future.standard_library.hooks():
import urllib.request
import urllib.parse
#-- PURPOSE: check internet connection and URL
def check_connection(doi):
#-- attempt to connect to remote url
remote_url = posixpath.join('https://api.crossref.org','works',
urllib.parse.quote_plus(doi))
try:
urllib.request.urlopen(remote_url,timeout=20,context=ssl.SSLContext())
except urllib.request.HTTPError:
raise RuntimeError('Check URL: {0}'.format(remote_url))
except urllib.request.URLError:
raise RuntimeError('Check internet connection')
else:
return True
#-- PURPOSE: create a Papers2-like cite key using the DOI
def smart_citekey(doi):
#-- open connection with crossref.org for DOI
crossref=posixpath.join('https://api.crossref.org','works',
urllib.parse.quote_plus(doi))
request=urllib.request.Request(url=crossref)
response=urllib.request.urlopen(request,timeout=60,context=ssl.SSLContext())
resp=json.loads(response.read())
#-- get author and replace unicode characters in author with plain text
author = resp['message']['author'][0]['family']
if sys.version_info[0] == 2:
author = author.decode('unicode-escape')
#-- check if author fields are initially uppercase: change to title
author = author.title() if author.isupper() else author
#-- 1st column: latex, 2nd: combining unicode, 3rd: unicode, 4th: plain text
for LV, CV, UV, PV in language_conversion():
author = author.replace(UV, PV)
#-- replace symbols
author = re.sub(b'\s|\-|\'',b'',author.encode('utf-8')).decode('utf-8')
#-- get publication date (prefer date when in print)
if 'published-print' in resp['message'].keys():
date_parts, = resp['message']['published-print']['date-parts']
elif 'published-online' in resp['message'].keys():
date_parts, = resp['message']['published-online']['date-parts']
#-- extract year from date parts
year = date_parts[0]
#-- create citekey suffix using a DOI-based universal citekey
#-- convert to unsigned 32-bit int if needed
crc = binascii.crc32(doi.encode('utf-8')) & 0xffffffff
#-- generate individual hashes
hash1 = chr(int(ord('b') + math.floor((crc % (10*26))/26)))
hash2 = chr(int(ord('a') + (crc % 26)))
#-- concatenate to form DOI-based universal citekey suffix
key = hash1 + hash2
#-- return the final citekey from the function
return '{0}:{1:4d}{2}'.format(author,year,key)
#-- main program that calls smart_citekey()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Generates Papers2-like cite keys for BibTeX using
information from crossref.org
"""
)
#-- command line parameters
parser.add_argument('doi',
type=str, nargs='+',
help='Digital Object Identifier (DOI) of the publication')
args = parser.parse_args()
#-- run for each DOI entered after the program
for doi in args.doi:
if check_connection(doi):
citekey = smart_citekey(doi)
print(citekey)
#-- run main program
if __name__ == '__main__':
main()
| 37.221374
| 80
| 0.701395
|
9ca7cc41881edcbd76262cf3831c1bfd691f61d9
| 3,725
|
py
|
Python
|
DQM/SiStripMonitorClient/python/SiStripClientConfig_Tier0_cff.py
|
emily-tsai11/cmssw
|
5adce1b6173b638caf2c7f6ce1e8c1c090296bbf
|
[
"Apache-2.0"
] | null | null | null |
DQM/SiStripMonitorClient/python/SiStripClientConfig_Tier0_cff.py
|
emily-tsai11/cmssw
|
5adce1b6173b638caf2c7f6ce1e8c1c090296bbf
|
[
"Apache-2.0"
] | null | null | null |
DQM/SiStripMonitorClient/python/SiStripClientConfig_Tier0_cff.py
|
emily-tsai11/cmssw
|
5adce1b6173b638caf2c7f6ce1e8c1c090296bbf
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
# SiStripOfflineDQM (for Tier0 Harvesting Step) ####
siStripOfflineAnalyser = cms.EDProducer("SiStripOfflineDQM",
GlobalStatusFilling = cms.untracked.int32(2),
CreateSummary = cms.untracked.bool(False),
SummaryConfigPath = cms.untracked.string("DQM/SiStripMonitorClient/data/sistrip_monitorelement_config.xml"),
UsedWithEDMtoMEConverter = cms.untracked.bool(True),
PrintFaultyModuleList = cms.untracked.bool(True),
CreateTkMap = cms.untracked.bool(False),
TrackRatePSet = cms.PSet(
Name = cms.string("NumberOfGoodTracks_"),
LowerCut = cms.double(1.0),
UpperCut = cms.double(1000.0),
),
TrackChi2PSet = cms.PSet(
Name = cms.string("GoodTrackChi2oNDF_"),
LowerCut = cms.double(0.0),
UpperCut = cms.double(25.0),
),
TrackHitPSet = cms.PSet(
Name = cms.string("GoodTrackNumberOfRecHitsPerTrack_"),
LowerCut = cms.double(5.0),
UpperCut = cms.double(20.0),
),
GoodTrackFractionPSet = cms.PSet(
Name = cms.string("FractionOfGoodTracks_"),
LowerCut = cms.double(0.85),
UpperCut = cms.double(1.1),
)
)
from DQMServices.Core.DQMQualityTester import DQMQualityTester
siStripQTester = DQMQualityTester(
qtList = cms.untracked.FileInPath('DQM/SiStripMonitorClient/data/sistrip_qualitytest_config_tier0.xml'),
prescaleFactor = cms.untracked.int32(1),
getQualityTestsFromFile = cms.untracked.bool(True)
)
from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA
pp_on_AA.toModify(siStripQTester,
qtList = cms.untracked.FileInPath('DQM/SiStripMonitorClient/data/sistrip_qualitytest_config_tier0_heavyions.xml')
)
from CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi import siStripQualityESProducer
mergedSiStripQualityProducer = siStripQualityESProducer.clone(
#names and desigantions
ListOfRecordToMerge = cms.VPSet(
cms.PSet(record = cms.string("SiStripDetVOffRcd"), tag = cms.string('')), # DCS information
cms.PSet(record = cms.string('SiStripDetCablingRcd'), tag = cms.string('')), # Use Detector cabling information to exclude detectors not connected
cms.PSet(record = cms.string('SiStripBadChannelRcd'), tag = cms.string('')), # Online Bad components
cms.PSet(record = cms.string('SiStripBadFiberRcd'), tag = cms.string('')), # Bad Channel list from the selected IOV as done at PCL
# BadChannel list from FED errors is included below
cms.PSet(record = cms.string('RunInfoRcd'), tag = cms.string('')) # List of FEDs exluded during data taking
)
)
mergedSiStripQualityProducer.ReduceGranularity = cms.bool(False)
mergedSiStripQualityProducer.ThresholdForReducedGranularity = cms.double(0.3)
mergedSiStripQualityProducer.appendToDataLabel = 'MergedBadComponent'
siStripBadComponentInfo = cms.EDProducer("SiStripBadComponentInfo",
StripQualityLabel = cms.string('MergedBadComponent'),
AddBadComponentsFromFedErrors = cms.untracked.bool(True),
FedErrorBadComponentsCutoff = cms.untracked.double(0.8)
)
# Sequence
SiStripOfflineDQMClient = cms.Sequence(siStripQTester*siStripOfflineAnalyser*siStripBadComponentInfo)
#removed modules using TkDetMap
#SiStripOfflineDQMClient = cms.Sequence(siStripQTester)
# Services needed for TkHistoMap
from CalibTracker.SiStripCommon.TkDetMapESProducer_cfi import *
| 49.013158
| 166
| 0.68349
|
143ee996dd8f53467e51b1b29f17e1b51675a685
| 3,246
|
py
|
Python
|
conceptBert/fusion_modules/ban_model/bcnet.py
|
ThalesGroup/ConceptBERT
|
0fb558af7df8c61be47bcf278e30cdf10315b572
|
[
"Apache-2.0",
"MIT"
] | 16
|
2021-07-23T13:15:12.000Z
|
2022-03-21T08:04:36.000Z
|
conceptBert/fusion_modules/ban_model/bcnet.py
|
ThalesGroup/ConceptBERT
|
0fb558af7df8c61be47bcf278e30cdf10315b572
|
[
"Apache-2.0",
"MIT"
] | 12
|
2021-07-28T04:32:32.000Z
|
2022-01-05T05:53:27.000Z
|
conceptBert/fusion_modules/ban_model/bcnet.py
|
ThalesGroup/ConceptBERT
|
0fb558af7df8c61be47bcf278e30cdf10315b572
|
[
"Apache-2.0",
"MIT"
] | 7
|
2021-07-23T12:55:35.000Z
|
2022-01-24T07:36:27.000Z
|
### LIBRARIES ###
# Global libraries
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
# Custom libraries
from conceptBert.conceptbert_models import FCNet
### CLASS DEFINITION ###
class BCNet(nn.Module):
"""
Simple class for non-linear bilinear connect network
"""
def __init__(
self, q1_dim, q2_dim, h_dim, h_out, act="ReLU", dropout=[0.2, 0.5], k=1
):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.q1_dim = q1_dim
self.q2_dim = q2_dim
self.h_dim = h_dim
self.h_out = h_out
self.q1_net = FCNet([q1_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.q2_net = FCNet([q2_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1]) # attention
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if None == h_out:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(
torch.Tensor(1, h_out, 1, h_dim * self.k).normal_()
)
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear(h_dim * self.k, h_out), dim=None)
def forward(self, q1, q2):
if None == self.h_out:
q1_ = self.q1_net(q1).transpose(1, 2).unsqueeze(3)
q2_ = self.q2_net(q2).transpose(1, 2).unsqueeze(2)
d_ = torch.matmul(q1_, q2_) # b x h_dim x v x q
logits = d_.transpose(1, 2).transpose(2, 3) # b x v x q x h_dim
return logits.sum(1).sum(1).unsqueeze(1)
# broadcast Hadamard product, matrix-matrix production
# fast computation but memory inefficient
# epoch 1, time: 157.84
elif self.h_out <= self.c:
q1_ = self.dropout(self.q1_net(q1)).unsqueeze(1)
q2_ = self.q2_net(q2)
h_ = q2_ * self.h_mat # broadcast, b x h_out x v x h_dim
logits = torch.matmul(
h_, q2_.unsqueeze(1).transpose(2, 3)
) # b x h_out x v x q
logits = logits + self.h_bias
return logits # b x h_out x v x q
# batch outer product, linear projection
# memory efficient but slow computation
# epoch 1, time: 304.87
else:
q1_ = self.dropout(self.q1_net(q1)).transpose(1, 2).unsqueeze(3)
q2_ = self.q_net(q2).transpose(1, 2).unsqueeze(2)
d_ = torch.matmul(q1_, q2_) # b x h_dim x v x q
logits = self.h_net(d_.transpose(1, 2).transpose(2, 3)) # b x v x q x h_out
return logits.transpose(2, 3).transpose(1, 2) # b x h_out x v x q
def forward_with_weights(self, q1, q2, w):
q1_ = self.q1_net(q1).transpose(1, 2).unsqueeze(2) # b x d x 1 x v
q2_ = self.q2_net(q2).transpose(1, 2).unsqueeze(3) # b x d x q x 1
logits = torch.matmul(torch.matmul(q1_, w.unsqueeze(1)), q2_) # b x d x 1 x 1
logits = logits.squeeze(3).squeeze(2)
if 1 < self.k:
logits = logits.unsqueeze(1) # b x 1 x d
logits = self.p_net(logits).squeeze(1) * self.k # sum-pooling
return logits
| 38.642857
| 88
| 0.566852
|
e8de135eb9760ec8f1bf582c517bbf4212139055
| 43,735
|
py
|
Python
|
gs/key.py
|
Rome84/AWS
|
32f5b6a83e37e62b0e33658bdab03ea493c905cb
|
[
"MIT"
] | null | null | null |
gs/key.py
|
Rome84/AWS
|
32f5b6a83e37e62b0e33658bdab03ea493c905cb
|
[
"MIT"
] | null | null | null |
gs/key.py
|
Rome84/AWS
|
32f5b6a83e37e62b0e33658bdab03ea493c905cb
|
[
"MIT"
] | null | null | null |
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
from boto.compat import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_restore_headers(self, response):
return
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
elif key == 'x-goog-storage-class':
self.storage_class = value
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: The file whose contents are to be uploaded.
:type headers: dict
:param headers: (optional) Additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: (optional) If this parameter is False, the method will
first check to see if an object exists in the bucket with the same
key. If it does, it won't overwrite it. The default value is True
which will overwrite the object.
:type cb: function
:param cb: (optional) Callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: (optional) A canned ACL policy that will be applied to
the new key in GS.
:type md5: tuple
:param md5: (optional) A tuple containing the hexdigest version of the
MD5 checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
If you need to compute the MD5 for any reason prior to upload, it's
silly to have to do it twice so this param, if present, will be
used as the MD5 values of the file. Otherwise, the checksum will be
computed.
:type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler`
:param res_upload_handler: (optional) If provided, this handler will
perform the upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from the
file pointer (fp). This is useful when uploading a file in multiple
parts where you are splitting the file up into different ranges to
be uploaded. If not specified, the default behaviour is to read all
bytes from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when a
resumable upload handler is given but is still useful for
uploading part of a file as implemented by the parent class.
2. At present Google Cloud Storage does not support multipart
uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from it. The default
behaviour is False which reads from the current position of the
file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS.
:type headers: dict
:param headers: (optional) Additional headers to pass along with the
request to GS.
:type replace: bool
:param replace: (optional) If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: (optional) Callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :py:attribute:`boto.gs.acl.CannedACLStrings`
:param policy: (optional) A canned ACL policy that will be applied to
the new key in GS.
:type md5: tuple
:param md5: (optional) A tuple containing the hexdigest version of the
MD5 checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
If you need to compute the MD5 for any reason prior to upload, it's
silly to have to do it twice so this param, if present, will be
used as the MD5 values of the file. Otherwise, the checksum will be
computed.
:type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler`
:param res_upload_handler: (optional) If provided, this handler will
perform the upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
| 46.085353
| 102
| 0.598857
|
645f00183e80caeff23b16a2eba7b9ed20a9dbc7
| 281
|
py
|
Python
|
searcher.py
|
nikhilranjan7/Subtitle-Downloader
|
ac0184cc785f7e2b2057be3f7f1ab43a27e373d2
|
[
"MIT"
] | null | null | null |
searcher.py
|
nikhilranjan7/Subtitle-Downloader
|
ac0184cc785f7e2b2057be3f7f1ab43a27e373d2
|
[
"MIT"
] | null | null | null |
searcher.py
|
nikhilranjan7/Subtitle-Downloader
|
ac0184cc785f7e2b2057be3f7f1ab43a27e373d2
|
[
"MIT"
] | null | null | null |
import re, sys, requests;
s=requests.get("https://subscene.com/subtitles/title?q="+str(sys.argv[1:]))
first = re.compile(r'<a href="/subtitles/(.*)"')
found = first.search(str(s.text))
link2 = 'https://subscene.com/subtitles/' + found.group(1) +'/english/'
print(found.group(1))
| 31.222222
| 75
| 0.676157
|
4e6bac55547f3a1b94e2b68a9e652f442d4106a9
| 909
|
py
|
Python
|
Examples/AppKit/CocoaBindings/Bookmarks/DNDTableView.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
Examples/AppKit/CocoaBindings/Bookmarks/DNDTableView.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
Examples/AppKit/CocoaBindings/Bookmarks/DNDTableView.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
#
# DNDTableView.py
# Bookmarks
#
# Converted by u.fiedler on 10.02.05.
#
# The original version was written in Objective-C by Malcolm Crawford
# at http://homepage.mac.com/mmalc/CocoaExamples/controllers.html
from AppKit import NSDragOperationLink, NSTableView
from objc import super
class DNDTableView(NSTableView):
def draggingSourceOperationMaskForLocal_(self, flag):
# This is a bug fix. See
# file:///Developer/ADC%20Reference%20Library/documentation/Cocoa/Conceptual/DragandDrop/Tasks/faq.html#//apple_ref/doc/uid/20002248/BBCGGBHE # noqa: B950
# or http://developer.apple.com/documentation/Cocoa/Conceptual/DragandDrop/Tasks/faq.html#//apple_ref/doc/uid/20002248/BBCFIJGF # noqa: B950
if not flag:
return NSDragOperationLink # link for external dragged URLs
return super(DNDTableView, self).draggingSourceOperationMaskForLocal_(flag)
| 41.318182
| 163
| 0.750275
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.