hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
338ad922689a64163b4057a690691d34c77177f5 | 1,475 | py | Python | venv/lib/python3.6/site-packages/pylint/test/functional/singledispatch_functions_py3.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/pylint/test/functional/singledispatch_functions_py3.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | 1 | 2021-06-01T23:32:38.000Z | 2021-06-01T23:32:38.000Z | venv/lib/python3.6/site-packages/pylint/test/functional/singledispatch_functions_py3.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | # pylint: disable=missing-docstring,import-error,unused-import,assignment-from-no-return
# pylint: disable=invalid-name, too-few-public-methods, useless-object-inheritance
from __future__ import print_function
from UNINFERABLE import uninferable_func
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
my_single_dispatch = singledispatch
fake_singledispatch_decorator = FakeSingleDispatch()
@singledispatch
@func.register(str)
@func.register(float)
@func.register(int)
@my_single_dispatch
@func2.register(int)
@singledispatch
@with_extra_arg.register(str)
@fake_singledispatch_decorator
@fake_singledispatch_decorator.register(str)
@fake_singledispatch_decorator.register(str)
| 18.910256 | 88 | 0.747119 | # pylint: disable=missing-docstring,import-error,unused-import,assignment-from-no-return
# pylint: disable=invalid-name, too-few-public-methods, useless-object-inheritance
from __future__ import print_function
from UNINFERABLE import uninferable_func
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
my_single_dispatch = singledispatch
class FakeSingleDispatch(object):
@staticmethod
def register(function):
return function
def __call__(self, function):
return function
fake_singledispatch_decorator = FakeSingleDispatch()
@singledispatch
def func(arg):
return arg
@func.register(str)
def _(arg):
return 42
@func.register(float)
@func.register(int)
def _(arg):
return 42
@my_single_dispatch
def func2(arg):
return arg
@func2.register(int)
def _(arg):
return 42
@singledispatch
def with_extra_arg(arg, verbose=False):
if verbose:
print(arg)
return arg
@with_extra_arg.register(str)
def _(arg, verbose=False):
unused = 42 # [unused-variable]
return arg[::-1]
@fake_singledispatch_decorator
def not_single_dispatch(arg): # [unused-argument]
return "not yet implemented"
@fake_singledispatch_decorator.register(str)
def bad_single_dispatch(arg): # [unused-argument]
return 42
@fake_singledispatch_decorator.register(str)
def bad_single_dispatch(arg): # [unused-argument, function-redefined]
return 24
| 386 | 83 | 243 |
1245d0c694729576145ea29bdcd9014b33589e31 | 587 | py | Python | Ex071-Simulador de CAIXA ELETRONICO.py | andersontmachado/ExerciciosPython | ebd93eb4127dadedee8b719ccc4bc20fc151d0ad | [
"MIT"
] | 1 | 2020-04-30T14:47:15.000Z | 2020-04-30T14:47:15.000Z | Ex071-Simulador de CAIXA ELETRONICO.py | andersontmachado/exerciciospython | ebd93eb4127dadedee8b719ccc4bc20fc151d0ad | [
"MIT"
] | null | null | null | Ex071-Simulador de CAIXA ELETRONICO.py | andersontmachado/exerciciospython | ebd93eb4127dadedee8b719ccc4bc20fc151d0ad | [
"MIT"
] | null | null | null | print('='*30)
print('{:^30}'.format('BANCO ANDERSON'))
print('='*30)
saque=int(input('Qual valor você quer sacar?R$'))
total=saque
céd=100
totalcéd=0
cont=0
while True:
if total>=céd:
total-=céd
totalcéd+=1
else:
if totalcéd>0:
print(f'Total de {totalcéd} cédulas de R${céd}')
if céd==100:
céd=50
elif céd==50:
céd=20
elif céd ==20:
céd=10
totalcéd=0
if total ==0:
break
print('='*50)
print('TESTANDO.....Muito Obrigado por utilizar o BANCO ANDERSON')
| 19.566667 | 66 | 0.531516 | print('='*30)
print('{:^30}'.format('BANCO ANDERSON'))
print('='*30)
saque=int(input('Qual valor você quer sacar?R$'))
total=saque
céd=100
totalcéd=0
cont=0
while True:
if total>=céd:
total-=céd
totalcéd+=1
else:
if totalcéd>0:
print(f'Total de {totalcéd} cédulas de R${céd}')
if céd==100:
céd=50
elif céd==50:
céd=20
elif céd ==20:
céd=10
totalcéd=0
if total ==0:
break
print('='*50)
print('TESTANDO.....Muito Obrigado por utilizar o BANCO ANDERSON')
| 0 | 0 | 0 |
db61429f15b9344ab77c00e6e51a776f9c8e4958 | 996 | py | Python | Part1-Basics/c_own_code.py | adrianaaguirresuch/Programming_with_python_2021 | 0bef696749159fe74fe759a7322aa4e141da2952 | [
"Apache-2.0"
] | null | null | null | Part1-Basics/c_own_code.py | adrianaaguirresuch/Programming_with_python_2021 | 0bef696749159fe74fe759a7322aa4e141da2952 | [
"Apache-2.0"
] | null | null | null | Part1-Basics/c_own_code.py | adrianaaguirresuch/Programming_with_python_2021 | 0bef696749159fe74fe759a7322aa4e141da2952 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
##################################################
# This script shows an example of a header, library and code section.
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# End of header section
import sys
# depending on the complexity of your script you will have a longer list of libraries
sys.stdout.write("This is an script with three sections \n\n")
sys.stdout.write("Header section using '#' characters\n")
sys.stdout.write("library section using import/from ... import commands\n")
sys.stdout.write("Code section calling the 'sys' library to show you this text\n")
| 36.888889 | 97 | 0.603414 | # encoding: utf-8
##################################################
# This script shows an example of a header, library and code section.
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# End of header section
import sys
# depending on the complexity of your script you will have a longer list of libraries
sys.stdout.write("This is an script with three sections \n\n")
sys.stdout.write("Header section using '#' characters\n")
sys.stdout.write("library section using import/from ... import commands\n")
sys.stdout.write("Code section calling the 'sys' library to show you this text\n")
| 0 | 0 | 0 |
fcde5d5fc6a79c407f6a9038a0300c83a7e63f6a | 13,569 | py | Python | calabiyau/cmd/radius.py | TachyonicProject/calabiyau | 415a8ada4a93ee84c4776e89c9442af328dcfdd6 | [
"BSD-3-Clause"
] | null | null | null | calabiyau/cmd/radius.py | TachyonicProject/calabiyau | 415a8ada4a93ee84c4776e89c9442af328dcfdd6 | [
"BSD-3-Clause"
] | 8 | 2019-06-06T11:01:48.000Z | 2019-06-06T12:18:03.000Z | calabiyau/cmd/radius.py | TachyonicProject/calabiyau | 415a8ada4a93ee84c4776e89c9442af328dcfdd6 | [
"BSD-3-Clause"
] | 3 | 2019-03-28T07:36:22.000Z | 2019-12-27T12:10:14.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from hashlib import md5
from time import sleep
from multiprocessing import (cpu_count,
current_process)
from luxon import g
from luxon import register
from luxon import GetLogger
from luxon import db, dbw
from luxon import MBClient
from luxon.utils.timezone import utc
from luxon.utils.multiproc import ProcessManager
from luxon.utils.multithread import ThreadManager
from luxon.utils.encoding import if_unicode_to_bytes
from calabiyau.core.helpers.radius import (get_user,
get_attributes,
has_session,
get_ip,
update_ip,
get_pool_name,
encode_packet)
from calabiyau.core.handlers.radius.server import Server
from calabiyau.constants import RAD_ACCESSACCEPT
from calabiyau.core.utils.radius import (validate_chap_password,
duplicate)
from calabiyau.lib.ctx import ctx as ctx_values
log = GetLogger(__name__)
clients_hash = b''
@register.resource('service', 'radius')
| 39.675439 | 79 | 0.46422 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from hashlib import md5
from time import sleep
from multiprocessing import (cpu_count,
current_process)
from luxon import g
from luxon import register
from luxon import GetLogger
from luxon import db, dbw
from luxon import MBClient
from luxon.utils.timezone import utc
from luxon.utils.multiproc import ProcessManager
from luxon.utils.multithread import ThreadManager
from luxon.utils.encoding import if_unicode_to_bytes
from calabiyau.core.helpers.radius import (get_user,
get_attributes,
has_session,
get_ip,
update_ip,
get_pool_name,
encode_packet)
from calabiyau.core.handlers.radius.server import Server
from calabiyau.constants import RAD_ACCESSACCEPT
from calabiyau.core.utils.radius import (validate_chap_password,
duplicate)
from calabiyau.lib.ctx import ctx as ctx_values
log = GetLogger(__name__)
def usage(crsr, user):
# Return Values
# 0 All good.
# 1 Deactivate Subscriber
user_id = user['id']
utc_datetime = datetime.utcnow()
if user['package_span'] and user['package_span'] > 0:
if (user['package_expire'] and
utc(utc_datetime) > utc(user['package_expire'])):
log.warning('Package expired (%s)'
% user['username'])
return 1
if user:
# IF DATA PLAN NOT UNCAPPED
if user['plan'] == 'data':
volume_used = user['volume_used']
volume_used_bytes = user['volume_used_bytes']
######################
# CHECK PACKAGE DATA #
######################
package_volume_bytes = user['volume_gb'] * 1024 * 1024 * 1024
if utc(user['volume_expire']) < utc(utc_datetime):
if user['volume_repeat']:
return 0
else:
log.warning('Package data expired (%s)'
% user['username'])
if (not volume_used and
volume_used_bytes > package_volume_bytes):
log.warning('Package data depleted (%s)'
% user['username'])
elif (not volume_used and
volume_used_bytes <= package_volume_bytes):
return 0
####################
# CHECK TOPUP DATA #
####################
crsr.execute('SELECT * FROM calabiyau_topup' +
' WHERE user_id = %s' +
' ORDER BY creation_time asc' +
' FOR UPDATE',
(user_id,))
topups = crsr.fetchall()
for topup in topups:
if topup['volume_gb']:
topup_volume_bytes = (topup['volume_gb'] * 1024 *
1024 * 1024)
else:
topup_volume_bytes = 0
if utc(topup['volume_expire']) < utc(utc_datetime):
if topup['volume_repeat']:
log.warning('Topup renew (%s, %s, %s Gb, %s)' %
(user['username'],
topup['id'],
topup['volume_gb'],
topup['creation_time'],))
db.commit()
return 0
else:
log.warning('Topup expired (%s, %s, %s Gb, %s)' %
(user['username'],
topup['id'],
topup['volume_gb'],
topup['creation_time'],))
else:
if volume_used_bytes < topup_volume_bytes:
return 0
else:
log.warning('Topup depleted (%s, %s, %s Gb, %s)' %
(user['username'],
topup['id'],
topup['volume_gb'],
topup['creation_time'],))
return 1
else:
return 0
class RadiusServer(Server):
__slots__ = ()
def auth(self, pkt, queue, debug):
with db() as dbro:
with dbro.cursor() as crsr:
client = pkt.get('NAS-IP-Address')[0]
user = get_user(crsr,
client,
pkt.source[0],
pkt.get('User-Name')[0])
if user:
if not user['enabled']:
log.warning('Subscriber account disabled (%s)'
% user['username'])
dbro.commit()
return
else:
log.warning('User not found (%s)'
% pkt.get('User-Name')[0])
dbro.commit()
return
if ('User-Password' in pkt):
if pkt['User-Password'][0] != user['password']:
# Check for Legacy MD5 hashed passwords.
hashed = md5(
pkt['User-Password'][0].encode(
'utf-8')).hexdigest()
if str(hashed) != user['password']:
dbro.commit()
log.warning('Password mismatch (%s)'
% user['username'])
return
elif ('CHAP-Password' in pkt and
not validate_chap_password(pkt, user['password'])):
dbro.commit()
log.warning('Password mismatch (%s)'
% user['username'])
return
elif ('User-Password' not in pkt and
'CHAP-Password' not in pkt):
dbro.commit()
log.warning('No password supplied (%s)'
% user['username'])
return
ctx = ctx_values[usage(crsr, user)]
attributes = get_attributes(crsr, user, ctx)
if ctx == 'deactivate-login' and not attributes:
return
if (user['static_ip4'] or
not user['simultaneous']):
if has_session(crsr, user):
log.warning('Subscriber duplicate session (%s)'
% user['username'])
dbro.commit()
return
elif user['static_ip4']:
attributes['Framed-IP-Address'] = user['static_ip4']
elif user['pool_id']:
with dbw() as dbwr:
ip = get_ip(dbwr, user)
if ip:
attributes['Framed-IP-Address'] = ip
else:
pool_name = get_pool_name(crsr, user)
log.critical("IP Pool Empty user '%s' pool '%s'"
% (user['username'],
pool_name,))
dbro.commit()
return
dbro.commit()
return (RAD_ACCESSACCEPT,
attributes)
def acct(self, pkt, queue, debug):
try:
queue.put(pkt, timeout=2)
except Exception:
log.critical('Dropping Accounting Packet' +
' (Queue timeout busy/full)')
return False
return True
def coa(self, pkt, queue, debug):
return False
def pod(self, pkt, queue, debug):
return False
def status(self, pkt, queue, debug):
return True
clients_hash = b''
def conf_manager(srv):
global clients_hash
# add clients (address, secret, name)
while True:
with db() as conn:
with conn.cursor() as crsr:
clients = crsr.execute('SELECT INET6_NTOA(server) as server' +
', secret FROM' +
' calabiyau_nas').fetchall()
string = str(clients).encode('utf-8')
new_hash = md5(string).digest()
if new_hash != clients_hash:
if clients:
for client in clients:
host = client['server']
secret = if_unicode_to_bytes(client['secret'])
srv.add_host(host,
secret)
else:
srv.set_hosts({})
clients_hash = new_hash
crsr.commit()
sleep(10)
def post_processing(queue):
while True:
pkt = queue.get(queue)
with MBClient('subscriber') as mb:
mb.send('radius_accounting',
{'attributes': encode_packet(pkt),
'datetime': str(datetime.utcnow())})
with db() as dbro:
with dbro.cursor() as crsr:
client = pkt.get('NAS-IP-Address')[0]
user = get_user(crsr,
client,
pkt.source[0],
pkt.get('User-Name')[0])
if user:
status = pkt.get('Acct-Status-Type', [''])[0].lower()
if not user['static_ip4'] and user['pool_id']:
with dbw() as dbwr:
update_ip(dbwr, status, user, pkt)
duplicate_to = g.app.config.get('radius',
'duplicate',
fallback=None)
if duplicate_to:
with dbro.cursor() as crsr:
client = pkt.get('NAS-IP-Address')[0]
user = get_user(crsr,
client,
pkt.source[0],
pkt.get('User-Name')[0])
if user:
pkt['Class'] = user['package'].encode('utf-8')
duplicates = duplicate_to.split(',')
for duplicate_to in duplicates:
duplicate_to = duplicate_to.strip()
duplicate(pkt.raw_packet, duplicate_to, 1813)
def post_process(queue):
proc_name = current_process().name
tm = ThreadManager()
for post_thread in range(cpu_count() * 2):
tm.new(post_processing,
'%s-%s' % (proc_name, post_thread+1,),
restart=True,
args=(queue,))
tm.start()
@register.resource('service', 'radius')
def start(req, resp):
try:
pm = ProcessManager()
# create server and read dictionary
srv = RadiusServer(debug=g.app.debug, process_manager=pm)
# Update Process
pm.new(conf_manager, 'ConfManager', args=(srv,))
for post_proc in range(cpu_count()):
pm.new(post_process,
'Post-Process-%s' % post_proc,
args=(srv.queue,))
# start server
srv.start()
pm.start()
except (KeyboardInterrupt, SystemExit):
pass
| 10,433 | 160 | 137 |
f20ed0f92c4fe16cae61792249e76a81c65db911 | 3,995 | py | Python | soda/core/soda/execution/identity.py | duyet/soda-core | 92a52e0d7c1e88624d0637123cfcb2610af6d112 | [
"Apache-2.0"
] | null | null | null | soda/core/soda/execution/identity.py | duyet/soda-core | 92a52e0d7c1e88624d0637123cfcb2610af6d112 | [
"Apache-2.0"
] | null | null | null | soda/core/soda/execution/identity.py | duyet/soda-core | 92a52e0d7c1e88624d0637123cfcb2610af6d112 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from datetime import timedelta
from hashlib import blake2b
from numbers import Number
from typing import Optional
@dataclass
| 35.669643 | 117 | 0.641302 | from dataclasses import dataclass
from datetime import timedelta
from hashlib import blake2b
from numbers import Number
from typing import Optional
class Identity:
@staticmethod
def property(field_name, value):
return IdentityProperty(field_name, value)
@staticmethod
def create_identity(
identity_type: str,
data_source_scan: "DataSourceScan",
partition: "Partition",
column: "Column",
name: Optional[str],
identity_parts: list,
):
parts = [identity_type]
if data_source_scan.scan._scan_definition_name:
parts.append(data_source_scan.scan._scan_definition_name)
parts.append(data_source_scan.data_source.data_source_name)
table = partition.table if partition else None
if table:
parts.append(table.table_name)
if partition and partition.partition_name is not None:
parts.append(partition.partition_name)
if column is not None:
parts.append(column.column_name)
if name:
parts.append(name)
if identity_parts:
hash_builder = ConsistentHashBuilder()
for identity_hash_part in identity_parts:
hash_builder.add(identity_hash_part)
hash = hash_builder.get_hash()
if hash:
parts.append(hash)
return "-".join(parts)
@dataclass
class IdentityProperty:
field_name: str
value: object
class ConsistentHashBuilder:
def __init__(self, hash_string_length: int = 8):
if hash_string_length % 2 != 0:
raise AssertionError(f"hash_string_length must be divisible by 2: {hash_string_length} is not")
self.hash_string_length = hash_string_length
self.blake2b = None
def get_blake2b(self) -> blake2b:
# Lazy initialization of blake2b in order to return None in the self.get_hash(self) in case nothing was added
if self.blake2b is None:
self.blake2b = blake2b(digest_size=int(self.hash_string_length / 2))
return self.blake2b
def add(self, value: Optional[str]):
from soda.sodacl.change_over_time_cfg import ChangeOverTimeCfg
from soda.sodacl.location import Location
from soda.sodacl.missing_and_valid_cfg import MissingAndValidCfg
from soda.sodacl.schema_check_cfg import SchemaValidations
from soda.sodacl.threshold_cfg import ThresholdCfg
if value is None:
return
elif isinstance(value, str):
self.get_blake2b().update(value.encode("utf-8"))
elif isinstance(value, Number) or isinstance(value, bool):
self.get_blake2b().update(str(value).encode("utf-8"))
elif isinstance(value, list) or isinstance(value, dict):
self.add_all(value)
elif isinstance(value, timedelta):
self.add(str(value))
elif isinstance(value, IdentityProperty):
if value.value is not None:
self.add(value.field_name)
self.add(value.value)
elif (
isinstance(value, Location)
or isinstance(value, MissingAndValidCfg)
or isinstance(value, ChangeOverTimeCfg)
or isinstance(value, ThresholdCfg)
or isinstance(value, SchemaValidations)
):
self.add_all(value.get_identity_parts())
else:
raise AssertionError(f"Expected str, number or None, not {value}")
def add_all(self, collection):
if isinstance(collection, list):
for e in collection:
self.add(e)
elif isinstance(collection, dict):
for k, v in collection.items():
self.add(k)
self.add(v)
elif collection is not None:
raise AssertionError(f"Expected list, dict or None, not {collection}")
def get_hash(self) -> str:
return self.blake2b.hexdigest() if self.blake2b else None
| 3,500 | 130 | 202 |
079686c628b26ba9fb16a068b7e7381045bb8fcd | 1,155 | py | Python | tests/test_get_rides.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | null | null | null | tests/test_get_rides.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | 5 | 2019-10-21T17:05:46.000Z | 2021-06-01T22:35:47.000Z | tests/test_get_rides.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | 1 | 2018-09-04T14:17:43.000Z | 2018-09-04T14:17:43.000Z | import sys,os
from tests.Base_test import BaseTest
import json
| 44.423077 | 91 | 0.660606 | import sys,os
from tests.Base_test import BaseTest
import json
class Test_get_rides(BaseTest):
def test_get_all_rides(self):
"""Test if user can be able to fetch all the available rides"""
resp = self.client().post('/api/v1/rides', data=json.dumps(self.test_ride),
content_type='application/json',headers=self.user_header)
self.assertEqual(resp.status_code, 201)
response = self.client().get('/api/v1/rides',headers=self.user_header)
self.assertEqual(response.status_code, 200)
def test_get_single_ride(self):
"""Tests that user can fetch a single ride"""
resp = self.client().post('/api/v1/rides', data=json.dumps(self.test_ride),
content_type='application/json',headers=self.user_header)
self.assertEqual(resp.status_code, 201)
response = self.client().get('/api/v1/rides/1',headers=self.user_header)
self.assertEqual(response.status_code, 200)
def test_get_non_existing(self):
response = self.client().get('/api/v1/rides/1588')
self.assertEqual(response.status_code, 404)
| 122 | 947 | 22 |
825d673fc6501b726f964c2e4ec41965ef80450a | 13,242 | py | Python | common/kvstore/consul_client.py | jeffvan-netsia/voltha_doc | 8af3c0e9348142ca07e849db8ce494ce66ea15f6 | [
"Apache-2.0"
] | null | null | null | common/kvstore/consul_client.py | jeffvan-netsia/voltha_doc | 8af3c0e9348142ca07e849db8ce494ce66ea15f6 | [
"Apache-2.0"
] | 3 | 2021-03-31T18:55:31.000Z | 2022-02-11T03:40:15.000Z | common/kvstore/consul_client.py | netsia/voltha_doc | 8af3c0e9348142ca07e849db8ce494ce66ea15f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
from common.utils.asleep import asleep
from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
from consul import ConsulException
from consul.twisted import Consul
from structlog import get_logger
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
log = get_logger()
| 38.271676 | 97 | 0.574838 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
from common.utils.asleep import asleep
from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
from consul import ConsulException
from consul.twisted import Consul
from structlog import get_logger
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
log = get_logger()
class ConsulClient(KVClient):
def __init__(self, kv_host, kv_port):
KVClient.__init__(self, kv_host, kv_port)
self.session_id = None
self.client = Consul(kv_host, kv_port)
self.watcher = None
@inlineCallbacks
def get(self, key, timeout=DEFAULT_TIMEOUT):
result = yield self._op_with_retry('GET', key, None, timeout)
returnValue(result)
@inlineCallbacks
def list(self, key, timeout=DEFAULT_TIMEOUT):
result = yield self._op_with_retry('LIST', key, None, timeout)
returnValue(result)
@inlineCallbacks
def put(self, key, value, timeout=DEFAULT_TIMEOUT):
_, err = yield self._op_with_retry('PUT', key, value, timeout)
returnValue(err)
@inlineCallbacks
def delete(self, key, timeout=DEFAULT_TIMEOUT):
_, err = yield self._op_with_retry('DELETE', key, None, timeout)
returnValue(err)
@inlineCallbacks
def reserve(self, key, value, ttl, timeout=DEFAULT_TIMEOUT):
result = yield self._op_with_retry('RESERVE', key, value, timeout, ttl=ttl)
returnValue(result)
@inlineCallbacks
def renew_reservation(self, key, timeout=DEFAULT_TIMEOUT):
result, err = yield self._op_with_retry('RENEW', key, None, timeout)
returnValue(err)
@inlineCallbacks
def release_reservation(self, key, timeout=DEFAULT_TIMEOUT):
result, err = yield self._op_with_retry('RELEASE', key, None, timeout)
returnValue(err)
@inlineCallbacks
def release_all_reservations(self, timeout=DEFAULT_TIMEOUT):
result, err = yield self._op_with_retry('RELEASE-ALL', None, None, timeout)
returnValue(err)
def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
self._retriggering_watch(key, key_change_callback, timeout)
@inlineCallbacks
def _retriggering_watch(self, key, key_change_callback, timeout):
self.key_watches[key] = ConsulWatch(self.client, key, key_change_callback, timeout)
yield self.key_watches[key].start()
def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
if key in self.key_watches:
self.key_watches[key].stop()
@inlineCallbacks
def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
log.debug('kv-op', operation=operation, key=key, timeout=timeout, args=args, kw=kw)
err = None
result = None
while True:
try:
if operation == 'GET':
result = yield self._get(key, **kw)
elif operation == 'LIST':
result, err = yield self._list(key)
elif operation == 'PUT':
# Put returns a boolean response
result = yield self.client.kv.put(key, value)
if not result:
err = 'put-failed'
elif operation == 'DELETE':
# Delete returns a boolean response
result = yield self.client.kv.delete(key)
if not result:
err = 'delete-failed'
elif operation == 'RESERVE':
result, err = yield self._reserve(key, value, **kw)
elif operation == 'RENEW':
result, err = yield self._renew_reservation(key)
elif operation == 'RELEASE':
result, err = yield self._release_reservation(key)
elif operation == 'RELEASE-ALL':
err = yield self._release_all_reservations()
self._clear_backoff()
break
except ConsulException as ex:
if 'ConnectionRefusedError' in ex.message:
log.exception('comms-exception', ex=ex)
yield self._backoff('consul-not-up')
else:
log.error('consul-specific-exception', ex=ex)
err = ex
except Exception as ex:
log.error('consul-exception', ex=ex)
err = ex
if timeout > 0 and self.retry_time > timeout:
err = 'operation-timed-out'
if err is not None:
self._clear_backoff()
break
returnValue((result,err))
@inlineCallbacks
def _get(self, key, **kw):
kvp = None
index, rec = yield self.client.kv.get(key, **kw)
if rec is not None:
kvp = KVPair(rec['Key'], rec['Value'], index)
returnValue(kvp)
@inlineCallbacks
def _list(self, key):
err = None
list = []
index, recs = yield self.client.kv.get(key, recurse=True)
for rec in recs:
list.append(KVPair(rec['Key'], rec['Value'], rec['ModifyIndex']))
returnValue((list, err))
@inlineCallbacks
def _reserve(self, key, value, **kw):
for name, val in kw.items():
if name == 'ttl':
ttl = val
break
reserved = False
err = 'reservation-failed'
owner = None
# Create a session
self.session_id = yield self.client.session.create(behavior='delete',
ttl=ttl) # lock_delay=1)
log.debug('create-session', id=self.session_id)
# Try to acquire the key
result = yield self.client.kv.put(key, value, acquire=self.session_id)
log.debug('key-acquire', key=key, value=value, sess=self.session_id, result=result)
# Check if reservation succeeded
index, record = yield self.client.kv.get(key)
if record is not None and 'Value' in record:
owner = record['Value']
log.debug('get-key', session=record['Session'], owner=owner)
if record['Session'] == self.session_id and owner == value:
reserved = True
log.debug('key-reserved', key=key, value=value, ttl=ttl)
# Add key to reservation list
self.key_reservations[key] = self.session_id
else:
log.debug('reservation-held-by-another', owner=owner)
if reserved:
err = None
returnValue((owner, err))
@inlineCallbacks
def _renew_reservation(self, key):
result = None
err = None
if key not in self.key_reservations:
err = 'key-not-reserved'
else:
session_id = self.key_reservations[key]
# A successfully renewed session returns an object with fields:
# Node, CreateIndex, Name, ModifyIndex, ID, Behavior, TTL,
# LockDelay, and Checks
result = yield self.client.session.renew(session_id=session_id)
log.debug('session-renew', result=result)
if result is None:
err = 'session-renewal-failed'
returnValue((result, err))
@inlineCallbacks
def _release_reservation(self, key):
err = None
if key not in self.key_reservations:
err = 'key-not-reserved'
else:
session_id = self.key_reservations[key]
# A successfully destroyed session returns a boolean result
success = yield self.client.session.destroy(session_id)
log.debug('session-destroy', result=success)
if not success:
err = 'session-destroy-failed'
self.session_id = None
self.key_reservations.pop(key)
returnValue((success, err))
@inlineCallbacks
def _release_all_reservations(self):
err = None
keys_to_delete = []
for key in self.key_reservations:
session_id = self.key_reservations[key]
# A successfully destroyed session returns a boolean result
success = yield self.client.session.destroy(session_id)
if not success:
err = 'session-destroy-failed'
log.debug('session-destroy', id=session_id, result=success)
self.session_id = None
keys_to_delete.append(key)
for key in keys_to_delete:
self.key_reservations.pop(key)
returnValue(err)
class ConsulWatch():
def __init__(self, consul, key, callback, timeout):
self.client = consul
self.key = key
self.index = None
self.callback = callback
self.timeout = timeout
self.period = 60
self.running = True
self.retries = 0
self.retry_time = 0
@inlineCallbacks
def start(self):
self.running = True
index, rec = yield self._get_with_retry(self.key, None,
timeout=self.timeout)
self.index = str(index)
@inlineCallbacks
def _get(key, deferred):
try:
index, rec = yield self._get_with_retry(key, None,
timeout=self.timeout,
index=self.index)
self.index = str(index)
if not deferred.called:
log.debug('got-result-cancelling-deferred')
deferred.callback((self.index, rec))
except Exception as e:
log.exception('got-exception', e=e)
while self.running:
try:
rcvd = DeferredWithTimeout(timeout=self.period)
_get(self.key, rcvd)
try:
# Update index for next watch iteration
index, rec = yield rcvd
log.debug('event-received', index=index, rec=rec)
# Notify client of key change event
if rec is None:
# Key has been deleted
self._send_event(Event(Event.DELETE, self.key, None))
else:
self._send_event(Event(Event.PUT, rec['Key'], rec['Value']))
except TimeOutError as e:
log.debug('no-events-over-watch-period', key=self.key)
except Exception as e:
log.exception('exception', e=e)
except Exception as e:
log.exception('exception', e=e)
log.debug('close-watch', key=self.key)
def stop(self):
self.running = False
self.callback = None
@inlineCallbacks
def _get_with_retry(self, key, value, timeout, *args, **kw):
log.debug('watch-period', key=key, period=self.period, timeout=timeout, args=args, kw=kw)
err = None
result = None
while True:
try:
result = yield self.client.kv.get(key, **kw)
self._clear_backoff()
break
except ConsulException as ex:
err = ex
if 'ConnectionRefusedError' in ex.message:
self._send_event(Event(Event.CONNECTION_DOWN, self.key, None))
log.exception('comms-exception', ex=ex)
yield self._backoff('consul-not-up')
else:
log.error('consul-specific-exception', ex=ex)
except Exception as ex:
err = ex
log.error('consul-exception', ex=ex)
if timeout > 0 and self.retry_time > timeout:
err = 'operation-timed-out'
if err is not None:
self._clear_backoff()
break
returnValue(result)
def _send_event(self, event):
if self.callback is not None:
self.callback(event)
def _backoff(self, msg):
wait_time = RETRY_BACKOFF[min(self.retries, len(RETRY_BACKOFF) - 1)]
self.retry_time += wait_time
self.retries += 1
log.error(msg, next_retry_in_secs=wait_time,
total_delay_in_secs = self.retry_time,
retries=self.retries)
return asleep(wait_time)
def _clear_backoff(self):
if self.retries:
log.debug('reconnected-to-kv', after_retries=self.retries)
self.retries = 0
self.retry_time = 0
| 11,111 | 1,087 | 46 |
5d1c35c080f79e7222b6c25c75d8f548a8b8d29e | 2,013 | py | Python | axon/tests/unit/apps/test_iperf.py | PradeepSingh1988/validation-app-engine | 81ed23939bc72ecbe188e22effed07e23f6e8b28 | [
"BSD-2-Clause"
] | null | null | null | axon/tests/unit/apps/test_iperf.py | PradeepSingh1988/validation-app-engine | 81ed23939bc72ecbe188e22effed07e23f6e8b28 | [
"BSD-2-Clause"
] | null | null | null | axon/tests/unit/apps/test_iperf.py | PradeepSingh1988/validation-app-engine | 81ed23939bc72ecbe188e22effed07e23f6e8b28 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
'''
Unit test for Iperf app.
'''
import logging
import time
import unittest
import warnings
from axon.apps.iperf import Iperf
log = logging.getLogger(__name__)
| 29.602941 | 86 | 0.624938 | #!/usr/bin/env python
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
'''
Unit test for Iperf app.
'''
import logging
import time
import unittest
import warnings
from axon.apps.iperf import Iperf
log = logging.getLogger(__name__)
class TestIperfApp(unittest.TestCase):
def setUp(self):
super(TestIperfApp, self).setUp()
self._app = Iperf()
warnings.simplefilter('ignore', category=ResourceWarning)
def test_iperfApp(self):
"""
Test iperf server / client / supporting APIs
"""
_server_ports = [7011, 7012, 7013]
_client_jobs = []
_running_ports = []
_test_duration = 10
# start iperf server on given port
for port in _server_ports:
assert self._app.start_iperf_server(port) == port
# start iperf server on random port
assert isinstance(self._app.start_iperf_server(), int)
_running_ports = self._app.get_server_ports()
assert _running_ports
# start iperf clients
for port in _running_ports:
_client_jobs.append(self._app.start_iperf_client('localhost', port,
duration=_test_duration))
assert _client_jobs
time.sleep(_test_duration)
# check client job info
for job in _client_jobs:
job_info = self._app.get_client_job_info(job)
assert job_info.get('popen_obj')
assert job_info.get('state') in ('running', 'done')
if job_info.get('state') == 'done':
assert job_info.get('result')
assert job_info.get('cmd')
# Stop iperf server running on different ports
for port in self._app.get_server_ports():
self._app.stop_iperf_server(port)
assert not self._app.is_running(port)
| 131 | 1,476 | 23 |
da460fa767169a87d353201091283d256418000e | 126 | py | Python | ar/datasets/data_cleaning.py | Learn-Live/activity_recognition | 76fa7bcecc3e422f1ea59fd1aaf576669e1248fb | [
"Apache-2.0"
] | 1 | 2022-01-10T21:02:50.000Z | 2022-01-10T21:02:50.000Z | ar/datasets/data_cleaning.py | Learn-Live/activity_recognition | 76fa7bcecc3e422f1ea59fd1aaf576669e1248fb | [
"Apache-2.0"
] | null | null | null | ar/datasets/data_cleaning.py | Learn-Live/activity_recognition | 76fa7bcecc3e422f1ea59fd1aaf576669e1248fb | [
"Apache-2.0"
] | null | null | null | """
1. duplicated values
2. irrelevant data
3. structural error ('female', 'fem')
4. missing values
5. outliers
"""
| 9.692308 | 38 | 0.634921 | """
1. duplicated values
2. irrelevant data
3. structural error ('female', 'fem')
4. missing values
5. outliers
"""
| 0 | 0 | 0 |
ce23ad8bd747db9994e49043d2cfeb6b66f9ee83 | 6,248 | py | Python | C5-Applied_Social_Network_Analysis_in_Python/Week_1/Loading+Graphs+in+NetworkX.py | urbanclimatefr/Coursera-Applied-Data-Science-with-Python | 85a74505c97849dc1bb27c139f1281831362b15d | [
"MIT"
] | null | null | null | C5-Applied_Social_Network_Analysis_in_Python/Week_1/Loading+Graphs+in+NetworkX.py | urbanclimatefr/Coursera-Applied-Data-Science-with-Python | 85a74505c97849dc1bb27c139f1281831362b15d | [
"MIT"
] | null | null | null | C5-Applied_Social_Network_Analysis_in_Python/Week_1/Loading+Graphs+in+NetworkX.py | urbanclimatefr/Coursera-Applied-Data-Science-with-Python | 85a74505c97849dc1bb27c139f1281831362b15d | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Loading Graphs in NetworkX
# In[1]:
import networkx as nx
import numpy as np
import pandas as pd
get_ipython().magic('matplotlib notebook')
# Instantiate the graph
G1 = nx.Graph()
# add node/edge pairs
G1.add_edges_from([(0, 1),
(0, 2),
(0, 3),
(0, 5),
(1, 3),
(1, 6),
(3, 4),
(4, 5),
(4, 7),
(5, 8),
(8, 9)])
# draw the network G1
nx.draw_networkx(G1)
# ### Adjacency List
# `G_adjlist.txt` is the adjacency list representation of G1.
#
# It can be read as follows:
# * `0 1 2 3 5` $\rightarrow$ node `0` is adjacent to nodes `1, 2, 3, 5`
# * `1 3 6` $\rightarrow$ node `1` is (also) adjacent to nodes `3, 6`
# * `2` $\rightarrow$ node `2` is (also) adjacent to no new nodes
# * `3 4` $\rightarrow$ node `3` is (also) adjacent to node `4`
#
# and so on. Note that adjacencies are only accounted for once (e.g. node `2` is adjacent to node `0`, but node `0` is not listed in node `2`'s row, because that edge has already been accounted for in node `0`'s row).
# In[2]:
get_ipython().system('cat G_adjlist.txt')
# If we read in the adjacency list using `nx.read_adjlist`, we can see that it matches `G1`.
# In[3]:
G2 = nx.read_adjlist('G_adjlist.txt', nodetype=int)
G2.edges()
# ### Adjacency Matrix
#
# The elements in an adjacency matrix indicate whether pairs of vertices are adjacent or not in the graph. Each node has a corresponding row and column. For example, row `0`, column `1` corresponds to the edge between node `0` and node `1`.
#
# Reading across row `0`, there is a '`1`' in columns `1`, `2`, `3`, and `5`, which indicates that node `0` is adjacent to nodes 1, 2, 3, and 5
# In[4]:
G_mat = np.array([[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]])
G_mat
# If we convert the adjacency matrix to a networkx graph using `nx.Graph`, we can see that it matches G1.
# In[5]:
G3 = nx.Graph(G_mat)
G3.edges()
# ### Edgelist
# The edge list format represents edge pairings in the first two columns. Additional edge attributes can be added in subsequent columns. Looking at `G_edgelist.txt` this is the same as the original graph `G1`, but now each edge has a weight.
#
# For example, from the first row, we can see the edge between nodes `0` and `1`, has a weight of `4`.
# In[6]:
get_ipython().system('cat G_edgelist.txt')
# Using `read_edgelist` and passing in a list of tuples with the name and type of each edge attribute will create a graph with our desired edge attributes.
# In[7]:
G4 = nx.read_edgelist('G_edgelist.txt', data=[('Weight', int)])
G4.edges(data=True)
# ### Pandas DataFrame
# Graphs can also be created from pandas dataframes if they are in edge list format.
# In[8]:
G_df = pd.read_csv('G_edgelist.txt', delim_whitespace=True,
header=None, names=['n1', 'n2', 'weight'])
G_df
# In[9]:
G5 = nx.from_pandas_dataframe(G_df, 'n1', 'n2', edge_attr='weight')
G5.edges(data=True)
# ### Chess Example
# Now let's load in a more complex graph and perform some basic analysis on it.
#
# We will be looking at chess_graph.txt, which is a directed graph of chess games in edge list format.
# In[10]:
get_ipython().system('head -5 chess_graph.txt')
# Each node is a chess player, and each edge represents a game. The first column with an outgoing edge corresponds to the white player, the second column with an incoming edge corresponds to the black player.
#
# The third column, the weight of the edge, corresponds to the outcome of the game. A weight of 1 indicates white won, a 0 indicates a draw, and a -1 indicates black won.
#
# The fourth column corresponds to approximate timestamps of when the game was played.
#
# We can read in the chess graph using `read_edgelist`, and tell it to create the graph using a `nx.MultiDiGraph`.
# In[11]:
chess = nx.read_edgelist('chess_graph.txt', data=[('outcome', int), ('timestamp', float)],
create_using=nx.MultiDiGraph())
# In[12]:
chess.is_directed(), chess.is_multigraph()
# In[13]:
chess.edges(data=True)
# Looking at the degree of each node, we can see how many games each person played. A dictionary is returned where each key is the player, and each value is the number of games played.
# In[14]:
games_played = chess.degree()
games_played
# Using list comprehension, we can find which player played the most games.
# In[15]:
max_value = max(games_played.values())
max_key, = [i for i in games_played.keys() if games_played[i] == max_value]
print('player {}\n{} games'.format(max_key, max_value))
# Let's use pandas to find out which players won the most games. First let's convert our graph to a DataFrame.
# In[16]:
df = pd.DataFrame(chess.edges(data=True), columns=['white', 'black', 'outcome'])
df.head()
# Next we can use a lambda to pull out the outcome from the attributes dictionary.
# In[17]:
df['outcome'] = df['outcome'].map(lambda x: x['outcome'])
df.head()
# To count the number of times a player won as white, we find the rows where the outcome was '1', group by the white player, and sum.
#
# To count the number of times a player won as back, we find the rows where the outcome was '-1', group by the black player, sum, and multiply by -1.
#
# The we can add these together with a fill value of 0 for those players that only played as either black or white.
# In[18]:
won_as_white = df[df['outcome']==1].groupby('white').sum()
won_as_black = -df[df['outcome']==-1].groupby('black').sum()
win_count = won_as_white.add(won_as_black, fill_value=0)
win_count.head()
# Using `nlargest` we find that player 330 won the most games at 109.
# In[19]:
win_count.nlargest(5, 'outcome')
| 29.471698 | 242 | 0.634443 |
# coding: utf-8
# # Loading Graphs in NetworkX
# In[1]:
import networkx as nx
import numpy as np
import pandas as pd
get_ipython().magic('matplotlib notebook')
# Instantiate the graph
G1 = nx.Graph()
# add node/edge pairs
G1.add_edges_from([(0, 1),
(0, 2),
(0, 3),
(0, 5),
(1, 3),
(1, 6),
(3, 4),
(4, 5),
(4, 7),
(5, 8),
(8, 9)])
# draw the network G1
nx.draw_networkx(G1)
# ### Adjacency List
# `G_adjlist.txt` is the adjacency list representation of G1.
#
# It can be read as follows:
# * `0 1 2 3 5` $\rightarrow$ node `0` is adjacent to nodes `1, 2, 3, 5`
# * `1 3 6` $\rightarrow$ node `1` is (also) adjacent to nodes `3, 6`
# * `2` $\rightarrow$ node `2` is (also) adjacent to no new nodes
# * `3 4` $\rightarrow$ node `3` is (also) adjacent to node `4`
#
# and so on. Note that adjacencies are only accounted for once (e.g. node `2` is adjacent to node `0`, but node `0` is not listed in node `2`'s row, because that edge has already been accounted for in node `0`'s row).
# In[2]:
get_ipython().system('cat G_adjlist.txt')
# If we read in the adjacency list using `nx.read_adjlist`, we can see that it matches `G1`.
# In[3]:
G2 = nx.read_adjlist('G_adjlist.txt', nodetype=int)
G2.edges()
# ### Adjacency Matrix
#
# The elements in an adjacency matrix indicate whether pairs of vertices are adjacent or not in the graph. Each node has a corresponding row and column. For example, row `0`, column `1` corresponds to the edge between node `0` and node `1`.
#
# Reading across row `0`, there is a '`1`' in columns `1`, `2`, `3`, and `5`, which indicates that node `0` is adjacent to nodes 1, 2, 3, and 5
# In[4]:
G_mat = np.array([[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]])
G_mat
# If we convert the adjacency matrix to a networkx graph using `nx.Graph`, we can see that it matches G1.
# In[5]:
G3 = nx.Graph(G_mat)
G3.edges()
# ### Edgelist
# The edge list format represents edge pairings in the first two columns. Additional edge attributes can be added in subsequent columns. Looking at `G_edgelist.txt` this is the same as the original graph `G1`, but now each edge has a weight.
#
# For example, from the first row, we can see the edge between nodes `0` and `1`, has a weight of `4`.
# In[6]:
get_ipython().system('cat G_edgelist.txt')
# Using `read_edgelist` and passing in a list of tuples with the name and type of each edge attribute will create a graph with our desired edge attributes.
# In[7]:
G4 = nx.read_edgelist('G_edgelist.txt', data=[('Weight', int)])
G4.edges(data=True)
# ### Pandas DataFrame
# Graphs can also be created from pandas dataframes if they are in edge list format.
# In[8]:
G_df = pd.read_csv('G_edgelist.txt', delim_whitespace=True,
header=None, names=['n1', 'n2', 'weight'])
G_df
# In[9]:
G5 = nx.from_pandas_dataframe(G_df, 'n1', 'n2', edge_attr='weight')
G5.edges(data=True)
# ### Chess Example
# Now let's load in a more complex graph and perform some basic analysis on it.
#
# We will be looking at chess_graph.txt, which is a directed graph of chess games in edge list format.
# In[10]:
get_ipython().system('head -5 chess_graph.txt')
# Each node is a chess player, and each edge represents a game. The first column with an outgoing edge corresponds to the white player, the second column with an incoming edge corresponds to the black player.
#
# The third column, the weight of the edge, corresponds to the outcome of the game. A weight of 1 indicates white won, a 0 indicates a draw, and a -1 indicates black won.
#
# The fourth column corresponds to approximate timestamps of when the game was played.
#
# We can read in the chess graph using `read_edgelist`, and tell it to create the graph using a `nx.MultiDiGraph`.
# In[11]:
chess = nx.read_edgelist('chess_graph.txt', data=[('outcome', int), ('timestamp', float)],
create_using=nx.MultiDiGraph())
# In[12]:
chess.is_directed(), chess.is_multigraph()
# In[13]:
chess.edges(data=True)
# Looking at the degree of each node, we can see how many games each person played. A dictionary is returned where each key is the player, and each value is the number of games played.
# In[14]:
games_played = chess.degree()
games_played
# Using list comprehension, we can find which player played the most games.
# In[15]:
max_value = max(games_played.values())
max_key, = [i for i in games_played.keys() if games_played[i] == max_value]
print('player {}\n{} games'.format(max_key, max_value))
# Let's use pandas to find out which players won the most games. First let's convert our graph to a DataFrame.
# In[16]:
df = pd.DataFrame(chess.edges(data=True), columns=['white', 'black', 'outcome'])
df.head()
# Next we can use a lambda to pull out the outcome from the attributes dictionary.
# In[17]:
df['outcome'] = df['outcome'].map(lambda x: x['outcome'])
df.head()
# To count the number of times a player won as white, we find the rows where the outcome was '1', group by the white player, and sum.
#
# To count the number of times a player won as back, we find the rows where the outcome was '-1', group by the black player, sum, and multiply by -1.
#
# The we can add these together with a fill value of 0 for those players that only played as either black or white.
# In[18]:
won_as_white = df[df['outcome']==1].groupby('white').sum()
won_as_black = -df[df['outcome']==-1].groupby('black').sum()
win_count = won_as_white.add(won_as_black, fill_value=0)
win_count.head()
# Using `nlargest` we find that player 330 won the most games at 109.
# In[19]:
win_count.nlargest(5, 'outcome')
| 0 | 0 | 0 |
2abb563319d5226456db00bd71af55dacad1ee8c | 717 | py | Python | lunch_5/assignment_solutions/track_1.py | Develawpers/monday_coding_lunches | a93920b9cfb0b0d23c0f211d54d1997aa8851b94 | [
"MIT"
] | 1 | 2022-01-17T14:44:41.000Z | 2022-01-17T14:44:41.000Z | lunch_5/assignment_solutions/track_1.py | Develawpers/monday_coding_lunches | a93920b9cfb0b0d23c0f211d54d1997aa8851b94 | [
"MIT"
] | null | null | null | lunch_5/assignment_solutions/track_1.py | Develawpers/monday_coding_lunches | a93920b9cfb0b0d23c0f211d54d1997aa8851b94 | [
"MIT"
] | 3 | 2022-01-31T10:26:52.000Z | 2022-03-06T23:41:13.000Z | from datetime import datetime
if __name__ == "__main__":
age = ask_age()
dob = ask_dob()
age_in_days = calc_age_in_days(dob)
declared_age_in_days = age * 365 # not super precise, but we don't care for now
treshold = 180 # let's set a lying treshold at 6 months
if abs(declared_age_in_days - age_in_days) > treshold:
print("You're lying!")
else:
print("All good")
| 23.129032 | 84 | 0.638773 | from datetime import datetime
def ask_age():
_age = int(input("What's your age? "))
return _age
def ask_dob():
_dob = input("What's your date of birth? (dd/mm/yyyy) ")
_dob_obj = datetime.strptime(_dob, "%d/%m/%Y")
return _dob_obj
def calc_age_in_days(_dob):
td = datetime.now() - _dob
return td.days
if __name__ == "__main__":
age = ask_age()
dob = ask_dob()
age_in_days = calc_age_in_days(dob)
declared_age_in_days = age * 365 # not super precise, but we don't care for now
treshold = 180 # let's set a lying treshold at 6 months
if abs(declared_age_in_days - age_in_days) > treshold:
print("You're lying!")
else:
print("All good")
| 233 | 0 | 69 |
607f4e365fcef4ce31776604160d9b61ceb3db26 | 1,774 | py | Python | helpers/fpga-bit-to-bin.py | pjsg/red-pitaya-notes | 85faf2ed717c1bb08d350376fe4b366fdc5c56a6 | [
"MIT"
] | 240 | 2015-02-28T19:14:08.000Z | 2022-03-26T13:52:30.000Z | helpers/fpga-bit-to-bin.py | pjsg/red-pitaya-notes | 85faf2ed717c1bb08d350376fe4b366fdc5c56a6 | [
"MIT"
] | 402 | 2015-05-08T11:51:32.000Z | 2022-03-22T21:28:43.000Z | helpers/fpga-bit-to-bin.py | pjsg/red-pitaya-notes | 85faf2ed717c1bb08d350376fe4b366fdc5c56a6 | [
"MIT"
] | 179 | 2015-01-08T22:53:02.000Z | 2022-03-21T16:42:18.000Z | #!/usr/bin/python
# copied from https://github.com/topic-embedded-products/meta-topic/blob/master/recipes-bsp/fpga/fpga-bit-to-bin/fpga-bit-to-bin.py
import sys
import os
import struct
import argparse
parser = argparse.ArgumentParser(description='Convert FPGA bit files to raw bin format suitable for flashing')
parser.add_argument('-f', '--flip', dest='flip', action='store_true', default=False, help='Flip 32-bit endianess (needed for Zynq)')
parser.add_argument("bitfile", help="Input bit file name")
parser.add_argument("binfile", help="Output bin file name")
args = parser.parse_args()
short = struct.Struct('>H')
ulong = struct.Struct('>I')
bitfile = open(args.bitfile, 'rb')
l = short.unpack(bitfile.read(2))[0]
if l != 9:
raise Exception, "Missing <0009> header (0x%x), not a bit file" % l
bitfile.read(l)
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
if d != 'a':
raise Exception, "Missing <a> header, not a bit file"
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print "Design name:", d
KEYNAMES = {'b': "Partname", 'c': "Date", 'd': "Time"}
while 1:
k = bitfile.read(1)
if not k:
raise Exception, "unexpected EOF"
elif k == 'e':
l = ulong.unpack(bitfile.read(4))[0]
print "found binary data:", l
d = bitfile.read(l)
if args.flip:
d = flip32(d)
open(args.binfile, 'wb').write(d)
break
elif k in KEYNAMES:
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print KEYNAMES[k], d
else:
print "Unexpected key: ", k
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
| 26.878788 | 132 | 0.675874 | #!/usr/bin/python
# copied from https://github.com/topic-embedded-products/meta-topic/blob/master/recipes-bsp/fpga/fpga-bit-to-bin/fpga-bit-to-bin.py
import sys
import os
import struct
def flip32(data):
sl = struct.Struct('<I')
sb = struct.Struct('>I')
b = buffer(data)
d = bytearray(len(data))
for offset in xrange(0, len(data), 4):
sb.pack_into(d, offset, sl.unpack_from(b, offset)[0])
return d
import argparse
parser = argparse.ArgumentParser(description='Convert FPGA bit files to raw bin format suitable for flashing')
parser.add_argument('-f', '--flip', dest='flip', action='store_true', default=False, help='Flip 32-bit endianess (needed for Zynq)')
parser.add_argument("bitfile", help="Input bit file name")
parser.add_argument("binfile", help="Output bin file name")
args = parser.parse_args()
short = struct.Struct('>H')
ulong = struct.Struct('>I')
bitfile = open(args.bitfile, 'rb')
l = short.unpack(bitfile.read(2))[0]
if l != 9:
raise Exception, "Missing <0009> header (0x%x), not a bit file" % l
bitfile.read(l)
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
if d != 'a':
raise Exception, "Missing <a> header, not a bit file"
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print "Design name:", d
KEYNAMES = {'b': "Partname", 'c': "Date", 'd': "Time"}
while 1:
k = bitfile.read(1)
if not k:
raise Exception, "unexpected EOF"
elif k == 'e':
l = ulong.unpack(bitfile.read(4))[0]
print "found binary data:", l
d = bitfile.read(l)
if args.flip:
d = flip32(d)
open(args.binfile, 'wb').write(d)
break
elif k in KEYNAMES:
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print KEYNAMES[k], d
else:
print "Unexpected key: ", k
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
| 199 | 0 | 23 |
f83273f6f1fc32348224782533b700ca5e9179fc | 806 | py | Python | tests/test_database.py | zesk06/pykscores | 3148b3ec023a2b526685255d01896991415c6730 | [
"Apache-2.0"
] | null | null | null | tests/test_database.py | zesk06/pykscores | 3148b3ec023a2b526685255d01896991415c6730 | [
"Apache-2.0"
] | null | null | null | tests/test_database.py | zesk06/pykscores | 3148b3ec023a2b526685255d01896991415c6730 | [
"Apache-2.0"
] | null | null | null | # pylint fail to discover some mongoengin members
# pylint: disable=E1101
import pykscores.database as db
from pykscores.database import User, Play, UserScore
import datetime
db.connect()
| 24.424242 | 54 | 0.668734 | # pylint fail to discover some mongoengin members
# pylint: disable=E1101
import pykscores.database as db
from pykscores.database import User, Play, UserScore
import datetime
db.connect()
def test_user():
# try to insert albert
if User.objects(login='albert'):
for user in User.objects(login='albert'):
print('delete previous albert')
user.delete()
new_u = User(login='albert')
new_u.save()
print('saved new_user')
def test_play():
new_p = Play()
new_p.date = datetime.datetime.now()
user1 = User.find_one(login='albert', create=True)
user2 = User.find_one(login='gerard', create=True)
score1 = UserScore(user=user1, score=10)
score2 = UserScore(user=user2, score=100)
new_p.scores = [score1, score2]
new_p.save()
| 570 | 0 | 46 |
dc07ef82cc6785181cafb67430d362390589a0d2 | 352 | py | Python | src/git.py | tiltshiftnl/bewerkdemarkten-python-api | 077efbf6829eba114ab95662725d1969f2a7d56f | [
"MIT"
] | null | null | null | src/git.py | tiltshiftnl/bewerkdemarkten-python-api | 077efbf6829eba114ab95662725d1969f2a7d56f | [
"MIT"
] | null | null | null | src/git.py | tiltshiftnl/bewerkdemarkten-python-api | 077efbf6829eba114ab95662725d1969f2a7d56f | [
"MIT"
] | null | null | null | from git.repo.base import Repo
from .settings import settings
| 29.333333 | 77 | 0.647727 | from git.repo.base import Repo
from .settings import settings
class Git:
def clone():
try:
print(f'Using existing repo in {settings.REPOSITORY_DIR}')
Repo(settings.REPOSITORY_DIR)
except:
print('Repo does not exist')
Repo.clone_from(settings.GIT_REPOSITORY, settings.REPOSITORY_DIR)
| 252 | -11 | 49 |
7cac5f6261b5713d68bed27dfaa1b1655b8c05d6 | 2,928 | py | Python | GetAssemblerProduction/Entities/ExtractEntitiesToFile.py | BurnySc2/Factorio-Scripts | 747ff77bb79485a73c7df9c06dfd314ec794fc07 | [
"MIT"
] | 1 | 2020-08-31T07:32:50.000Z | 2020-08-31T07:32:50.000Z | GetAssemblerProduction/Entities/ExtractEntitiesToFile.py | BurnySc2/Factorio-Scripts | 747ff77bb79485a73c7df9c06dfd314ec794fc07 | [
"MIT"
] | null | null | null | GetAssemblerProduction/Entities/ExtractEntitiesToFile.py | BurnySc2/Factorio-Scripts | 747ff77bb79485a73c7df9c06dfd314ec794fc07 | [
"MIT"
] | null | null | null |
import os
import re
import base64
import zlib
import json
import time
from slpp import slpp as lua # https://github.com/SirAnthony/slpp
if __name__ == "__main__":
factorioPath = r"C:\Program Files (x86)\Steam\SteamApps\common\Factorio"
path = os.path.dirname(__file__)
# recipeRelPath = r"data\base\prototypes\recipe"
# entityRelPath = r"data\base\prototypes\entity"
# graphicsRelPath = r"data\base\graphics\entity"
# cacheRelPath = "cache"
# # make cache directory
# if not os.path.isdir(os.path.join(path, cacheRelPath)):
# os.makedirs(os.path.join(path, cacheRelPath))
# # saving entities to entities.json, acts like "caching"
# if os.path.isfile(os.path.join(path, cacheRelPath, "entities.json")):
# with open(os.path.join(path, cacheRelPath, "entities.json")) as f:
# entityJson = json.load(f)
# else:
# entityJson = readLuaToJson(factorioPath, entityRelPath, filenamesContain = ["entities"])
# with open(os.path.join(path, cacheRelPath, "entities.json"), "w") as f:
# json.dump(entityJson, f, indent=4)
entityRelPath = r"data\base\prototypes\entity"
if os.path.isfile(os.path.join(path, "entities.json")):
# with open(os.path.join(path, cacheRelPath, "recipes.json")) as f:
# recipeJson = json.load(f)
pass
else:
entityJson = readLuaToJson(factorioPath, entityRelPath, filenamesContain = [])
with open(os.path.join(path, "entities.json"), "w") as f:
json.dump(entityJson, f, indent=4) | 37.538462 | 98 | 0.63832 |
import os
import re
import base64
import zlib
import json
import time
from slpp import slpp as lua # https://github.com/SirAnthony/slpp
def convertLuaTableToJson(luaTable):
if luaTable.startswith("data:extend("):
luaTable = luaTable[len("data:extend(") : -1]
pythonJson = lua.decode(luaTable)
return pythonJson
def readLuaFile(filePath):
start = "data:extend\("
end = "\)"
# inBetween = "\{(\s*\{(.)+\},?\s*)+\}"
inBetween = "(\s*.+\s*)+"
regexSearchString = "({}{}{})".format(start, inBetween, end)
reSearch = re.compile(regexSearchString)
with open(filePath) as file:
content = file.read()
results = reSearch.findall(content)
if results == []:
return []
foundData = [x[0] for x in results][0]
return foundData
def readLuaToJson(factorioPath, relPath, filenamesContain = ["entities"]):
returnList = []
for fileName in os.listdir(os.path.join(factorioPath, relPath)):
if len(filenamesContain) > 0 and filenamesContain[0] not in fileName: #TODO:
continue
filePath = os.path.join(factorioPath, relPath, fileName)
fileContents = readLuaFile(filePath)
if fileContents == []:
continue
jsonData = convertLuaTableToJson(fileContents)
returnList.extend(jsonData)
returnList = [x for x in returnList if type(x) == type({})]
returnJson = {x.get("name", "noName"): x for x in returnList}
return returnJson
if __name__ == "__main__":
factorioPath = r"C:\Program Files (x86)\Steam\SteamApps\common\Factorio"
path = os.path.dirname(__file__)
# recipeRelPath = r"data\base\prototypes\recipe"
# entityRelPath = r"data\base\prototypes\entity"
# graphicsRelPath = r"data\base\graphics\entity"
# cacheRelPath = "cache"
# # make cache directory
# if not os.path.isdir(os.path.join(path, cacheRelPath)):
# os.makedirs(os.path.join(path, cacheRelPath))
# # saving entities to entities.json, acts like "caching"
# if os.path.isfile(os.path.join(path, cacheRelPath, "entities.json")):
# with open(os.path.join(path, cacheRelPath, "entities.json")) as f:
# entityJson = json.load(f)
# else:
# entityJson = readLuaToJson(factorioPath, entityRelPath, filenamesContain = ["entities"])
# with open(os.path.join(path, cacheRelPath, "entities.json"), "w") as f:
# json.dump(entityJson, f, indent=4)
entityRelPath = r"data\base\prototypes\entity"
if os.path.isfile(os.path.join(path, "entities.json")):
# with open(os.path.join(path, cacheRelPath, "recipes.json")) as f:
# recipeJson = json.load(f)
pass
else:
entityJson = readLuaToJson(factorioPath, entityRelPath, filenamesContain = [])
with open(os.path.join(path, "entities.json"), "w") as f:
json.dump(entityJson, f, indent=4) | 1,289 | 0 | 69 |
ff03325d398c10333bd04e76b6626eb724d0d6ef | 18 | py | Python | test_data/parse_retree/expected/quantifier/non_greedy/at_most_3/source.py | aas-core-works/aas-core-codegen | afec2cf363b6cb69816e7724a2b58626e2165869 | [
"MIT"
] | 5 | 2021-12-29T12:55:34.000Z | 2022-03-01T17:57:21.000Z | test_data/parse_retree/expected/quantifier/non_greedy/at_most_3/source.py | aas-core-works/aas-core-codegen | afec2cf363b6cb69816e7724a2b58626e2165869 | [
"MIT"
] | 10 | 2021-12-29T02:15:55.000Z | 2022-03-09T11:04:22.000Z | test_data/parse_retree/expected/quantifier/non_greedy/at_most_3/source.py | aas-core-works/aas-core-codegen | afec2cf363b6cb69816e7724a2b58626e2165869 | [
"MIT"
] | 2 | 2021-12-29T01:42:12.000Z | 2022-02-15T13:46:33.000Z | "a{ \t, \t3 \t}?"
| 9 | 17 | 0.277778 | "a{ \t, \t3 \t}?"
| 0 | 0 | 0 |
7b0c4e6785f56dda5be3c5671ee8dc8e53628cfc | 52 | py | Python | Python32/lista 1/l1e11.py | andersonsilvade/python_C | ffc00184883089f1c2d9b8a6c32503b2c8b8d035 | [
"MIT"
] | null | null | null | Python32/lista 1/l1e11.py | andersonsilvade/python_C | ffc00184883089f1c2d9b8a6c32503b2c8b8d035 | [
"MIT"
] | null | null | null | Python32/lista 1/l1e11.py | andersonsilvade/python_C | ffc00184883089f1c2d9b8a6c32503b2c8b8d035 | [
"MIT"
] | 1 | 2020-11-04T08:36:28.000Z | 2020-11-04T08:36:28.000Z |
texto=str(2**1000000)
>>> print(len(texto))
301030
| 10.4 | 21 | 0.673077 |
texto=str(2**1000000)
>>> print(len(texto))
301030
| 0 | 0 | 0 |
9d8f7d1dc8487fe807c941751bb3892b3669e395 | 676 | py | Python | Alphabets/Small Alphabets/u.py | vijayakumarr345/pattern | d857812cea625098a18c9d45ca01b22a379d5fb0 | [
"MIT"
] | null | null | null | Alphabets/Small Alphabets/u.py | vijayakumarr345/pattern | d857812cea625098a18c9d45ca01b22a379d5fb0 | [
"MIT"
] | 1 | 2021-03-18T12:33:06.000Z | 2021-03-18T12:33:48.000Z | Alphabets/Small Alphabets/u.py | vijayakumarr345/pattern | d857812cea625098a18c9d45ca01b22a379d5fb0 | [
"MIT"
] | null | null | null | # Small alphabet u using function
def for_u():
""" *'s printed in the shape of small u """
for row in range(5):
for col in range(5):
if col %4 ==0 and row !=4 or row ==4 and col%4 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
print()
def while_u():
""" *'s printed in the Shape of Small u """
row =0
while row <5:
col =0
while col <5:
if col %4 ==0 and row !=4 or row ==4 and col%4 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
col+=1
print()
row +=1
| 26 | 64 | 0.392012 | # Small alphabet u using function
def for_u():
""" *'s printed in the shape of small u """
for row in range(5):
for col in range(5):
if col %4 ==0 and row !=4 or row ==4 and col%4 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
print()
def while_u():
""" *'s printed in the Shape of Small u """
row =0
while row <5:
col =0
while col <5:
if col %4 ==0 and row !=4 or row ==4 and col%4 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
col+=1
print()
row +=1
| 0 | 0 | 0 |
13f2fc4bdf3c23e6c4ac4da77c56fc9d6c38f110 | 3,256 | py | Python | cloudbaseinit/tests/plugins/windows/test_createuser.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 160 | 2015-01-09T14:45:59.000Z | 2022-03-15T09:15:12.000Z | cloudbaseinit/tests/plugins/windows/test_createuser.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 95 | 2015-01-25T15:22:05.000Z | 2022-03-16T10:40:27.000Z | cloudbaseinit/tests/plugins/windows/test_createuser.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 86 | 2015-01-19T17:19:35.000Z | 2022-03-24T09:21:55.000Z | # Copyright 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.plugins.windows import createuser
from cloudbaseinit.tests import testutils
| 35.010753 | 78 | 0.673833 | # Copyright 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.plugins.windows import createuser
from cloudbaseinit.tests import testutils
class CreateUserPluginTests(unittest.TestCase):
def setUp(self):
self._create_user = createuser.CreateUserPlugin()
def test_create_user(self):
mock_osutils = mock.Mock()
self._create_user.create_user(
mock.sentinel.username,
mock.sentinel.password,
mock_osutils)
mock_osutils.create_user.assert_called_once_with(
mock.sentinel.username,
mock.sentinel.password)
@mock.patch('cloudbaseinit.plugins.windows.createuser.CreateUserPlugin.'
'_create_user_logon')
def test_post_create_user(self, mock_create_user_logon):
mock_osutils = mock.Mock()
self._create_user.post_create_user(
mock.sentinel.username,
mock.sentinel.password,
mock_osutils)
mock_create_user_logon.assert_called_once_with(
mock.sentinel.username,
mock.sentinel.password,
mock_osutils)
def test__create_user_logon(self):
mock_osutils = mock.Mock()
mock_token = mock.sentinel.token
mock_osutils.create_user_logon_session.return_value = mock_token
self._create_user._create_user_logon(
mock.sentinel.user_name,
mock.sentinel.password,
mock_osutils)
mock_osutils.create_user_logon_session.assert_called_once_with(
mock.sentinel.user_name,
mock.sentinel.password,
True)
mock_osutils.close_user_logon_session.assert_called_once_with(
mock_token)
def test__create_user_logon_fails(self):
mock_osutils = mock.Mock()
mock_osutils.create_user_logon_session.side_effect = Exception
with testutils.LogSnatcher('cloudbaseinit.plugins.windows.'
'createuser') as snatcher:
self._create_user._create_user_logon(
mock.sentinel.user_name,
mock.sentinel.password,
mock_osutils)
mock_osutils.create_user_logon_session.assert_called_once_with(
mock.sentinel.user_name,
mock.sentinel.password,
True)
self.assertFalse(mock_osutils.close_user_logon_session.called)
logging_message = (
"Cannot create a user logon session for user: \"%s\""
% mock.sentinel.user_name)
self.assertTrue(snatcher.output[0].startswith(logging_message))
| 2,153 | 276 | 23 |
d528cf6556193e0f2cf0b028b866ddfb3748effc | 3,680 | py | Python | gdc_ng_models/models/released_data.py | NCI-GDC/gdc-ng-models | 4c3a964b0626ced2cbbccebcc7483490f9251aea | [
"Apache-2.0"
] | 2 | 2020-04-01T19:31:56.000Z | 2020-10-21T23:16:07.000Z | gdc_ng_models/models/released_data.py | NCI-GDC/gdc-ng-models | 4c3a964b0626ced2cbbccebcc7483490f9251aea | [
"Apache-2.0"
] | 9 | 2019-09-20T19:40:25.000Z | 2021-09-23T19:06:01.000Z | gdc_ng_models/models/released_data.py | NCI-GDC/gdc-ng-models | 4c3a964b0626ced2cbbccebcc7483490f9251aea | [
"Apache-2.0"
] | 1 | 2020-08-14T08:55:27.000Z | 2020-08-14T08:55:27.000Z | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.sql import schema, sqltypes
from gdc_ng_models.models import audit
Base = declarative_base()
RELEASED_DATA_DATA_TYPE_VALUES = frozenset({"ssm", "cnv", "case"})
RELEASED_DATA_LOG_ACTION_VALUES = frozenset({"release", "unrelease"})
| 34.074074 | 119 | 0.648913 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.sql import schema, sqltypes
from gdc_ng_models.models import audit
Base = declarative_base()
RELEASED_DATA_DATA_TYPE_VALUES = frozenset({"ssm", "cnv", "case"})
RELEASED_DATA_LOG_ACTION_VALUES = frozenset({"release", "unrelease"})
class ReleasedDataMixin:
program_name = schema.Column(sqltypes.Text, nullable=False)
project_code = schema.Column(sqltypes.Text, nullable=False)
is_open = schema.Column(sqltypes.Boolean, nullable=False)
data_type = schema.Column(sqltypes.Text, nullable=False)
@property
def project_id(self):
return "{}-{}".format(self.program_name, self.project_code)
@validates("data_type")
def validate_data_type(self, key, data_type):
if data_type not in RELEASED_DATA_DATA_TYPE_VALUES:
raise ValueError(
""""{data_type}" is not a valid value for {key}""".format(
data_type=data_type, key=key
)
)
return data_type
class ReleasedData(Base, audit.AuditColumnsMixin, ReleasedDataMixin):
__tablename__ = "released_data"
__table_args__ = (
schema.PrimaryKeyConstraint(
"program_name", "project_code", "data_type", name="released_data_pk",
),
)
def __repr__(self):
return "<ReleasedData(project_id='{}', data_type='{}', is_controlled={}, is_open={})>".format(
self.project_id, self.data_type, self.is_controlled, self.is_open,
)
is_controlled = schema.Column(sqltypes.Boolean, nullable=False)
def to_json(self):
return {
"program_name": self.program_name,
"project_code": self.project_code,
"data_type": self.data_type,
"is_controlled": self.is_controlled,
"is_open": self.is_open,
}
@hybrid_property
def id(self):
return "{}_{}_{}".format(self.program_name, self.project_code, self.data_type)
class ReleasedDataLog(Base, audit.AuditColumnsMixin, ReleasedDataMixin):
__tablename__ = "released_data_log"
__table_args__ = (
schema.Index(
"released_data_log_program_name_project_code_idx",
"program_name",
"project_code",
),
schema.PrimaryKeyConstraint("id", name="released_data_log_pk"),
)
def __repr__(self):
return "<ReleasedDataLog(project_id='{}', release_number={}, data_type='{}', is_open={}, action='{}')>".format(
self.project_id, self.release_number, self.data_type, self.is_open, self.action,
)
release_data_log_id_seq = schema.Sequence(
name="release_data_log_id_seq", metadata=Base.metadata
)
id = schema.Column(
sqltypes.BigInteger,
nullable=False,
server_default=release_data_log_id_seq.next_value(),
)
release_number = schema.Column(sqltypes.Text, nullable=False)
action = schema.Column(sqltypes.Text, nullable=False)
@validates("action")
def validate_action(self, key, action):
if action not in RELEASED_DATA_LOG_ACTION_VALUES:
raise ValueError(
""""{action}" is not a valid value for {key}""".format(action=action, key=key)
)
return action
def to_json(self):
return {
"program_name": self.program_name,
"project_code": self.project_code,
"release_number": self.release_number,
"data_type": self.data_type,
"is_open": self.is_open,
"action": self.action,
}
| 1,617 | 1,599 | 69 |
dba570c2688294d73d82bb732bc97ec59cff421a | 51,294 | py | Python | batoid/rayVector.py | jmeyers314/batoid | 85cbd13a9573ddca158c9c21ced2ef0c5ad5cd25 | [
"BSD-2-Clause"
] | 13 | 2018-12-24T03:55:04.000Z | 2021-11-09T11:40:40.000Z | batoid/rayVector.py | bregeon/batoid | 7b03d9b59ff43db6746eadab7dd58a463a0415c3 | [
"BSD-2-Clause"
] | 65 | 2017-08-15T07:19:05.000Z | 2021-09-08T17:44:57.000Z | batoid/rayVector.py | bregeon/batoid | 7b03d9b59ff43db6746eadab7dd58a463a0415c3 | [
"BSD-2-Clause"
] | 10 | 2019-02-19T07:02:31.000Z | 2021-12-10T22:19:40.000Z | from numbers import Real, Integral
import numpy as np
from . import _batoid
from .constants import globalCoordSys, vacuum
from .coordSys import CoordSys
from .coordTransform import CoordTransform
from .trace import applyForwardTransform, applyForwardTransformArrays
from .utils import lazy_property, fieldToDirCos
from .surface import Plane
class RayVector:
"""Create RayVector from 1d parameter arrays. Always makes a copy
of input arrays.
Parameters
----------
x, y, z : ndarray of float, shape (n,)
Positions of rays in meters.
vx, vy, vz : ndarray of float, shape (n,)
Velocities of rays in units of the speed of light in vacuum.
t : ndarray of float, shape (n,)
Reference times (divided by the speed of light in vacuum) in units
of meters.
wavelength : ndarray of float, shape (n,)
Vacuum wavelengths in meters.
flux : ndarray of float, shape (n,)
Fluxes in arbitrary units.
vignetted : ndarray of bool, shape (n,)
True where rays have been vignetted.
coordSys : CoordSys
Coordinate system in which this ray is expressed. Default: the
global coordinate system.
"""
@staticmethod
def positionAtTime(self, t):
"""Calculate the positions of the rays at a given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape (n, 3)
Positions in meters.
"""
out = np.empty_like(self._r)
self._rv.positionAtTime(t, out.ctypes.data)
return out
def propagate(self, t):
"""Propagate this RayVector to given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
RayVector
Reference to self, no copy is made.
"""
self._rv.propagateInPlace(t)
return self
def phase(self, r, t):
"""Calculate plane wave phases at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters at which to compute phase
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape(n,)
"""
out = np.empty_like(self._t)
self._rv.phase(r[0], r[1], r[2], t, out.ctypes.data)
return out
def amplitude(self, r, t):
"""Calculate (scalar) complex electric-field amplitudes at given
position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of complex, shape (n,)
"""
out = np.empty_like(self._t, dtype=np.complex128)
self._rv.amplitude(r[0], r[1], r[2], t, out.ctypes.data)
return out
def sumAmplitude(self, r, t, ignoreVignetted=True):
"""Calculate the sum of (scalar) complex electric-field amplitudes of
all rays at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
complex
"""
return self._rv.sumAmplitude(r[0], r[1], r[2], t, ignoreVignetted)
@classmethod
def asGrid(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nx=None, ny=None,
dx=None, dy=None,
lx=None, ly=None,
flux=1,
nrandom=None
):
"""Create RayVector on a parallelogram shaped region.
This function will often be used to create a grid of rays on a square
grid, but is flexible enough to also create grids on an arbitrary
parallelogram, or even randomly distributed across an arbitrary
parallelogram-shaped region.
The algorithm starts by placing rays on the "stop" surface, and then
backing them up such that they are in front of any surfaces of the
optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``lx`` from the Optic. Note that
values explicitly passed to `asGrid` as keyword arguments override
those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nx, ny : int, optional
Number of rays on each side of grid.
dx, dy : float or (2,) array of float, optional
Separation in meters between adjacent rays in grid. If scalars,
then the separations are exactly along the x and y directions. If
arrays, then these are interpretted as the primitive vectors for
the first and second dimensions of the grid. If only dx is
explicitly specified, then dy will be inferred as a 90-degree
rotation from dx with the same length as dx.
lx, ly : float or (2,) array of float, optional
Length of each side of ray grid. If scalars, then these are
measured along the x and y directions. If arrays, then these also
indicate the primitive vectors orientation of the grid. If only
lx is specified, then ly will be inferred as a 90-degree rotation
from lx with the same length as lx. If lx is ``None``, then first
infer a value from ``nx`` and ``dx``, and if that doesn't work,
infer a value from ``optic.pupilSize``.
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : None or int, optional
If not None, then uniformly sample this many rays from
parallelogram region instead of sampling on a regular grid.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
try:
stopSurface = optic.stopSurface
except AttributeError:
stopSurface = None
if lx is None:
# If nx and dx are both present, then let lx get inferred from
# them. Otherwise, infer from optic.
if nx is None or dx is None:
lx = optic.pupilSize
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
# To determine the parallelogram, exactly 2 of nx, dx, lx must be set.
if sum(a is not None for a in [nx, dx, lx]) != 2:
raise ValueError("Exactly 2 of nx, dx, lx must be specified")
if nx is not None and ny is None:
ny = nx
if dx is not None and dy is None:
dy = dx
if lx is not None and ly is None:
if isinstance(lx, Real):
ly = lx
else:
ly = np.dot(np.array([[0, -1], [1, 0]]), lx)
# We need lx, ly, nx, ny for below, so construct these from other
# arguments if they're not already available.
if nx is not None and dx is not None:
if (nx%2) == 0:
lx = dx*(nx-2)
else:
lx = dx*(nx-1)
if (ny%2) == 0:
ly = dy*(ny-2)
else:
ly = dy*(ny-1)
elif lx is not None and dx is not None:
# adjust dx in this case
# always infer an even n (since even and odd are degenerate given
# only lx, dx).
slop = 0.1 # prevent 3.9999 -> 3, e.g.
nx = int((lx/dx+slop)//2)*2+2
ny = int((ly/dy+slop)//2)*2+2
# These are the real dx, dy; which may be different from what was
# passed in order to force an integer for nx/ny. We don't actually
# need them after this point though.
# dx = lx/(nx-2)
# dy = ly/(ny-2)
if isinstance(lx, Real):
lx = (lx, 0.0)
if isinstance(ly, Real):
ly = (0.0, ly)
if nrandom is not None:
xx = np.random.uniform(-0.5, 0.5, size=nrandom)
yy = np.random.uniform(-0.5, 0.5, size=nrandom)
else:
if nx <= 2:
x_d = 1.
else:
x_d = (nx-(2 if (nx%2) == 0 else 1))/nx
if ny <= 2:
y_d = 1.
else:
y_d = (ny-(2 if (ny%2) == 0 else 1))/ny
xx = np.fft.fftshift(np.fft.fftfreq(nx, x_d))
yy = np.fft.fftshift(np.fft.fftfreq(ny, y_d))
xx, yy = np.meshgrid(xx, yy)
xx = xx.ravel()
yy = yy.ravel()
r = np.empty((len(xx), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
stack = np.stack([xx, yy])
x[:] = np.dot(lx, stack)
y[:] = np.dot(ly, stack)
del xx, yy, stack
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def asPolar(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nrad=None, naz=None,
flux=1,
nrandom=None
):
"""Create RayVector on an annular region using a hexapolar grid.
This function can be used to regularly sample the entrance pupil of a
telescope using polar symmetry (really, hexagonal symmetry). Rings of
different radii are used, with the number of samples on each ring
restricted to a multiple of 6 (with the exception of a potential
central "ring" of radius 0, which is only ever sampled once). This may
be more efficient than using a square grid since more of the rays
generated may avoid vignetting.
This function is also used to generate rays uniformly randomly sampled
from a given annular region.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asPolar` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nrad : int
Number of radii on which create rays.
naz : int
Approximate number of azimuthal angles uniformly spaced along the
outermost ring. Each ring is constrained to have a multiple of 6
azimuths, so the realized value may be slightly different than
the input value here. Inner rings will have fewer azimuths in
proportion to their radius, but will still be constrained to a
multiple of 6. (If the innermost ring has radius 0, then exactly
1 ray, with azimuth undefined, will be used on that "ring".)
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : int, optional
If not None, then uniformly sample this many rays from annular
region instead of sampling on a hexapolar grid.
"""
from .optic import Interface
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if nrandom is None:
nphis = []
rhos = np.linspace(outer, inner, nrad)
for rho in rhos:
nphi = int((naz*rho/outer)//6)*6
if nphi == 0:
nphi = 6
nphis.append(nphi)
if inner == 0.0:
nphis[-1] = 1
th = np.empty(np.sum(nphis))
rr = np.empty(np.sum(nphis))
idx = 0
for rho, nphi in zip(rhos, nphis):
rr[idx:idx+nphi] = rho
th[idx:idx+nphi] = np.linspace(0, 2*np.pi, nphi, endpoint=False)
idx += nphi
if inner == 0.0:
rr[-1] = 0.0
th[-1] = 0.0
else:
rr = np.sqrt(np.random.uniform(inner**2, outer**2, size=nrandom))
th = np.random.uniform(0, 2*np.pi, size=nrandom)
r = np.empty((len(rr), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = rr*np.cos(th)
y[:] = rr*np.sin(th)
del rr, th
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def asSpokes(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
spokes=None, rings=None,
spacing='uniform',
flux=1
):
"""Create RayVector on an annular region using a spokes pattern.
The function generates rays on a rings-and-spokes pattern, with a fixed
number of radii for each azimuth and a fixed number of azimuths for
each radius. Its main use is for decomposing functions in pupil space
into Zernike components using Gaussian Quadrature integration on
annuli. For more general purpose annular sampling, RayVector.asPolar()
is often a better choice since it samples the pupil more uniformly.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asSpokes` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
spokes : int or ndarray of float
If int, then number of spokes to use.
If ndarray, then the values of the spokes azimuthal angles in
radians.
rings : int or ndarray of float
If int, then number of rings to use.
If array, then the values of the ring radii to use in meters.
spacing : {'uniform', 'GQ'}
If uniform, assign ring radii uniformly between ``inner`` and
``outer``.
If GQ, then assign ring radii as the Gaussian Quadrature points
for integration on an annulus. In this case, the ray fluxes will
be set to the Gaussian Quadrature weights (and the ``flux`` kwarg
will be ignored).
flux : float, optional
Flux to assign each ray. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if isinstance(rings, Integral):
if spacing == 'uniform':
rings = np.linspace(inner, outer, rings)
elif spacing == 'GQ':
if spokes is None:
spokes = 2*rings+1
Li, w = np.polynomial.legendre.leggauss(rings)
eps = inner/outer
area = np.pi*(1-eps**2)
rings = np.sqrt(eps**2 + (1+Li)*(1-eps**2)/2)*outer
flux = w*area/(2*spokes)
if isinstance(spokes, Integral):
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
rings, spokes = np.meshgrid(rings, spokes)
flux = np.broadcast_to(flux, rings.shape)
rings = rings.ravel()
spokes = spokes.ravel()
flux = flux.ravel()
r = np.empty((len(rings), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = rings*np.cos(spokes)
y[:] = rings*np.sin(spokes)
del rings, spokes
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def _finish(cls, backDist, source, dirCos, n, r, w, flux):
"""Map rays backwards to their source position."""
if isinstance(flux, Real):
flux = np.full(len(r), float(flux))
if source is None:
from ._batoid import finishParallel
vv = np.array(dirCos, dtype=float)
vv /= n*np.sqrt(np.dot(vv, vv))
zhat = -n*vv
xhat = np.cross(np.array([1.0, 0.0, 0.0]), zhat)
xhat /= np.sqrt(np.dot(xhat, xhat))
yhat = np.cross(xhat, zhat)
origin = zhat*backDist
rot = np.stack([xhat, yhat, zhat]).T
finishParallel(origin, rot.ravel(), vv, r.ctypes.data, len(r))
v = np.full_like(r, vv)
t = np.zeros(len(r), dtype=float)
vignetted = np.zeros(len(r), dtype=bool)
failed = np.zeros(len(r), dtype=bool)
return RayVector._directInit(
r, v, t, w, flux, vignetted, failed, globalCoordSys
)
else:
v = np.copy(r)
v -= source
v /= n*np.einsum('ab,ab->b', v, v)
r[:] = source
t = np.zeros(len(r), dtype=float)
vignetted = np.zeros(len(r), dtype=bool)
failed = np.zeros(len(r), dtype=bool)
return RayVector._directInit(
r, v, t, w, flux, vignetted, failed, globalCoordSys
)
@classmethod
def fromStop(
cls, x, y,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
flux=1
):
"""Create rays that intersects the "stop" surface at given points.
The algorithm used here starts by placing the rays on the "stop"
surface, and then backing them up such that they are in front of any
surfaces of the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
x, y : ndarray
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where the rays originate. If None, then the rays originate an
infinite distance away, in which case the ``dirCos`` kwarg must also
be specified to set the direction of ray propagation. If an
ndarray, then the rays originates from this point in global
coordinates and the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then indicates the direction of ray propagation.
If source is not None, then this is ignored.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
xx = np.atleast_1d(x)
yy = np.atleast_1d(y)
r = np.empty((len(xx), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = xx
y[:] = yy
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def fromFieldAngles(
cls, theta_x, theta_y, projection='postel',
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
x=0, y=0,
flux=1
):
"""Create RayVector with one stop surface point but many field angles.
This method is similar to `fromStop` but broadcasts over ``theta_x``
and ``theta_y`` instead of over ``x`` and ``y``. There is less
currently less effort paid to synchronizing the ``t`` values of the
created rays, as they don't correspond to points on a physical incoming
wavefront in this case. The primary intended use case is to map chief
rays (``x``=``y``=0) from incoming field angle to focal plane position.
Parameters
----------
theta_x, theta_y : ndarray
Field angles in radians.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface this far. This should
generally be set large enough that any obscurations or phantom
surfaces occuring before the stop surface are now "in front" of the
rays. If this keyword is set to ``None`` and the ``optic`` keyword
is set, then infer a value from ``optic.backDist``. If both this
keyword and ``optic`` are ``None``, then use a default of 40 meters,
which should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
x, y : float
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if wavelength is None:
raise ValueError("Missing wavelength keyword")
vx, vy, vz = fieldToDirCos(theta_x, theta_y, projection=projection)
n = medium.getN(wavelength)
vx /= n
vy /= n
vz /= n
z = stopSurface.surface.sag(x, y)
x = np.full_like(vx, x)
y = np.full_like(vx, y)
z = np.full_like(vx, z)
t = np.zeros_like(vx)
rv = RayVector(
x, y, z,
vx, vy, vz,
t, wavelength, flux,
coordSys=stopSurface.coordSys
)
rv.propagate(-backDist*n)
return rv
@property
def r(self):
"""ndarray of float, shape (n, 3): Positions of rays in meters."""
self._rv.r.syncToHost()
return self._r
@property
def x(self):
"""The x components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 0]
@property
def y(self):
"""The y components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 1]
@property
def z(self):
"""The z components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 2]
@property
def v(self):
"""ndarray of float, shape (n, 3): Velocities of rays in units of the
speed of light in vacuum. Note that these may have magnitudes < 1 if
the rays are inside a refractive medium.
"""
self._rv.v.syncToHost()
return self._v
@property
def vx(self):
"""The x components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 0]
@property
def vy(self):
"""The y components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 1]
@property
def vz(self):
"""The z components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 2]
@property
def t(self):
"""Reference times (divided by the speed of light in vacuum) in units
of meters, also known as the optical path lengths.
"""
self._rv.t.syncToHost()
return self._t
@property
def wavelength(self):
"""Vacuum wavelengths in meters."""
# wavelength is constant, so no need to synchronize
return self._wavelength
@property
def flux(self):
"""Fluxes in arbitrary units."""
self._rv.flux.syncToHost()
return self._flux
@property
def vignetted(self):
"""True for rays that have been vignetted."""
self._rv.vignetted.syncToHost()
return self._vignetted
@property
def failed(self):
"""True for rays that have failed. This may occur, for example, if
batoid failed to find the intersection of a ray wiht a surface.
"""
self._rv.failed.syncToHost()
return self._failed
@property
def k(self):
r"""ndarray of float, shape (n, 3): Wavevectors of plane waves in units
of radians per meter. The magnitude of each wavevector is equal to
:math:`2 \pi n / \lambda`, where :math:`n` is the refractive index and
:math:`\lambda` is the wavelength.
"""
out = 2*np.pi*np.array(self.v)
out /= self.wavelength[:, None]
out /= np.sum(self.v*self.v, axis=-1)[:, None]
return out
@property
def kx(self):
"""The x component of each ray wavevector in radians per meter."""
return self.k[:,0]
@property
def ky(self):
"""The y component of each ray wavevector in radians per meter."""
return self.k[:,1]
@property
def kz(self):
"""The z component of each ray wavevector in radians per meter."""
return self.k[:,2]
@property
def omega(self):
r"""The temporal angular frequency of each plane wave divided by the
vacuum speed of light in units of radians per meter. Equals
:math:`2 \pi / \lambda`.
"""
return 2*np.pi/self.wavelength
@lazy_property
def toCoordSys(self, coordSys):
"""Transform this RayVector into a new coordinate system.
Parameters
----------
coordSys: batoid.CoordSys
Destination coordinate system.
Returns
-------
RayVector
Reference to self, no copy is made.
"""
transform = CoordTransform(self.coordSys, coordSys)
applyForwardTransform(transform, self)
return self
| 41.002398 | 106 | 0.58424 | from numbers import Real, Integral
import numpy as np
from . import _batoid
from .constants import globalCoordSys, vacuum
from .coordSys import CoordSys
from .coordTransform import CoordTransform
from .trace import applyForwardTransform, applyForwardTransformArrays
from .utils import lazy_property, fieldToDirCos
from .surface import Plane
def _reshape_arrays(arrays, shape, dtype=float):
for i in range(len(arrays)):
array = arrays[i]
if not hasattr(array, 'shape') or array.shape != shape:
arrays[i] = np.array(np.broadcast_to(array, shape))
arrays[i] = np.ascontiguousarray(arrays[i], dtype=dtype)
return arrays
class RayVector:
"""Create RayVector from 1d parameter arrays. Always makes a copy
of input arrays.
Parameters
----------
x, y, z : ndarray of float, shape (n,)
Positions of rays in meters.
vx, vy, vz : ndarray of float, shape (n,)
Velocities of rays in units of the speed of light in vacuum.
t : ndarray of float, shape (n,)
Reference times (divided by the speed of light in vacuum) in units
of meters.
wavelength : ndarray of float, shape (n,)
Vacuum wavelengths in meters.
flux : ndarray of float, shape (n,)
Fluxes in arbitrary units.
vignetted : ndarray of bool, shape (n,)
True where rays have been vignetted.
coordSys : CoordSys
Coordinate system in which this ray is expressed. Default: the
global coordinate system.
"""
def __init__(
self, x, y, z, vx, vy, vz, t=0.0, wavelength=0.0, flux=1.0,
vignetted=False, failed=False, coordSys=globalCoordSys
):
shape = np.broadcast(
x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
).shape
x, y, z, vx, vy, vz, t, wavelength, flux = _reshape_arrays(
[x, y, z, vx, vy, vz, t, wavelength, flux],
shape
)
vignetted, failed = _reshape_arrays(
[vignetted, failed],
shape,
bool
)
self._r = np.ascontiguousarray([x, y, z], dtype=float).T
self._v = np.ascontiguousarray([vx, vy, vz], dtype=float).T
self._t = t
self._wavelength = wavelength
self._flux = flux
self._vignetted = vignetted
self._failed = failed
self.coordSys = coordSys
@staticmethod
def _directInit(
r, v, t, wavelength, flux, vignetted, failed, coordSys
):
ret = RayVector.__new__(RayVector)
ret._r = r
ret._v = v
ret._t = t
ret._wavelength = wavelength
ret._flux = flux
ret._vignetted = vignetted
ret._failed = failed
ret.coordSys = coordSys
return ret
def positionAtTime(self, t):
"""Calculate the positions of the rays at a given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape (n, 3)
Positions in meters.
"""
out = np.empty_like(self._r)
self._rv.positionAtTime(t, out.ctypes.data)
return out
def propagate(self, t):
"""Propagate this RayVector to given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
RayVector
Reference to self, no copy is made.
"""
self._rv.propagateInPlace(t)
return self
def phase(self, r, t):
"""Calculate plane wave phases at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters at which to compute phase
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape(n,)
"""
out = np.empty_like(self._t)
self._rv.phase(r[0], r[1], r[2], t, out.ctypes.data)
return out
def amplitude(self, r, t):
"""Calculate (scalar) complex electric-field amplitudes at given
position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of complex, shape (n,)
"""
out = np.empty_like(self._t, dtype=np.complex128)
self._rv.amplitude(r[0], r[1], r[2], t, out.ctypes.data)
return out
def sumAmplitude(self, r, t, ignoreVignetted=True):
"""Calculate the sum of (scalar) complex electric-field amplitudes of
all rays at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
complex
"""
return self._rv.sumAmplitude(r[0], r[1], r[2], t, ignoreVignetted)
@classmethod
def asGrid(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nx=None, ny=None,
dx=None, dy=None,
lx=None, ly=None,
flux=1,
nrandom=None
):
"""Create RayVector on a parallelogram shaped region.
This function will often be used to create a grid of rays on a square
grid, but is flexible enough to also create grids on an arbitrary
parallelogram, or even randomly distributed across an arbitrary
parallelogram-shaped region.
The algorithm starts by placing rays on the "stop" surface, and then
backing them up such that they are in front of any surfaces of the
optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``lx`` from the Optic. Note that
values explicitly passed to `asGrid` as keyword arguments override
those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nx, ny : int, optional
Number of rays on each side of grid.
dx, dy : float or (2,) array of float, optional
Separation in meters between adjacent rays in grid. If scalars,
then the separations are exactly along the x and y directions. If
arrays, then these are interpretted as the primitive vectors for
the first and second dimensions of the grid. If only dx is
explicitly specified, then dy will be inferred as a 90-degree
rotation from dx with the same length as dx.
lx, ly : float or (2,) array of float, optional
Length of each side of ray grid. If scalars, then these are
measured along the x and y directions. If arrays, then these also
indicate the primitive vectors orientation of the grid. If only
lx is specified, then ly will be inferred as a 90-degree rotation
from lx with the same length as lx. If lx is ``None``, then first
infer a value from ``nx`` and ``dx``, and if that doesn't work,
infer a value from ``optic.pupilSize``.
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : None or int, optional
If not None, then uniformly sample this many rays from
parallelogram region instead of sampling on a regular grid.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
try:
stopSurface = optic.stopSurface
except AttributeError:
stopSurface = None
if lx is None:
# If nx and dx are both present, then let lx get inferred from
# them. Otherwise, infer from optic.
if nx is None or dx is None:
lx = optic.pupilSize
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
# To determine the parallelogram, exactly 2 of nx, dx, lx must be set.
if sum(a is not None for a in [nx, dx, lx]) != 2:
raise ValueError("Exactly 2 of nx, dx, lx must be specified")
if nx is not None and ny is None:
ny = nx
if dx is not None and dy is None:
dy = dx
if lx is not None and ly is None:
if isinstance(lx, Real):
ly = lx
else:
ly = np.dot(np.array([[0, -1], [1, 0]]), lx)
# We need lx, ly, nx, ny for below, so construct these from other
# arguments if they're not already available.
if nx is not None and dx is not None:
if (nx%2) == 0:
lx = dx*(nx-2)
else:
lx = dx*(nx-1)
if (ny%2) == 0:
ly = dy*(ny-2)
else:
ly = dy*(ny-1)
elif lx is not None and dx is not None:
# adjust dx in this case
# always infer an even n (since even and odd are degenerate given
# only lx, dx).
slop = 0.1 # prevent 3.9999 -> 3, e.g.
nx = int((lx/dx+slop)//2)*2+2
ny = int((ly/dy+slop)//2)*2+2
# These are the real dx, dy; which may be different from what was
# passed in order to force an integer for nx/ny. We don't actually
# need them after this point though.
# dx = lx/(nx-2)
# dy = ly/(ny-2)
if isinstance(lx, Real):
lx = (lx, 0.0)
if isinstance(ly, Real):
ly = (0.0, ly)
if nrandom is not None:
xx = np.random.uniform(-0.5, 0.5, size=nrandom)
yy = np.random.uniform(-0.5, 0.5, size=nrandom)
else:
if nx <= 2:
x_d = 1.
else:
x_d = (nx-(2 if (nx%2) == 0 else 1))/nx
if ny <= 2:
y_d = 1.
else:
y_d = (ny-(2 if (ny%2) == 0 else 1))/ny
xx = np.fft.fftshift(np.fft.fftfreq(nx, x_d))
yy = np.fft.fftshift(np.fft.fftfreq(ny, y_d))
xx, yy = np.meshgrid(xx, yy)
xx = xx.ravel()
yy = yy.ravel()
r = np.empty((len(xx), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
stack = np.stack([xx, yy])
x[:] = np.dot(lx, stack)
y[:] = np.dot(ly, stack)
del xx, yy, stack
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def asPolar(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nrad=None, naz=None,
flux=1,
nrandom=None
):
"""Create RayVector on an annular region using a hexapolar grid.
This function can be used to regularly sample the entrance pupil of a
telescope using polar symmetry (really, hexagonal symmetry). Rings of
different radii are used, with the number of samples on each ring
restricted to a multiple of 6 (with the exception of a potential
central "ring" of radius 0, which is only ever sampled once). This may
be more efficient than using a square grid since more of the rays
generated may avoid vignetting.
This function is also used to generate rays uniformly randomly sampled
from a given annular region.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asPolar` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nrad : int
Number of radii on which create rays.
naz : int
Approximate number of azimuthal angles uniformly spaced along the
outermost ring. Each ring is constrained to have a multiple of 6
azimuths, so the realized value may be slightly different than
the input value here. Inner rings will have fewer azimuths in
proportion to their radius, but will still be constrained to a
multiple of 6. (If the innermost ring has radius 0, then exactly
1 ray, with azimuth undefined, will be used on that "ring".)
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : int, optional
If not None, then uniformly sample this many rays from annular
region instead of sampling on a hexapolar grid.
"""
from .optic import Interface
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if nrandom is None:
nphis = []
rhos = np.linspace(outer, inner, nrad)
for rho in rhos:
nphi = int((naz*rho/outer)//6)*6
if nphi == 0:
nphi = 6
nphis.append(nphi)
if inner == 0.0:
nphis[-1] = 1
th = np.empty(np.sum(nphis))
rr = np.empty(np.sum(nphis))
idx = 0
for rho, nphi in zip(rhos, nphis):
rr[idx:idx+nphi] = rho
th[idx:idx+nphi] = np.linspace(0, 2*np.pi, nphi, endpoint=False)
idx += nphi
if inner == 0.0:
rr[-1] = 0.0
th[-1] = 0.0
else:
rr = np.sqrt(np.random.uniform(inner**2, outer**2, size=nrandom))
th = np.random.uniform(0, 2*np.pi, size=nrandom)
r = np.empty((len(rr), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = rr*np.cos(th)
y[:] = rr*np.sin(th)
del rr, th
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def asSpokes(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
spokes=None, rings=None,
spacing='uniform',
flux=1
):
"""Create RayVector on an annular region using a spokes pattern.
The function generates rays on a rings-and-spokes pattern, with a fixed
number of radii for each azimuth and a fixed number of azimuths for
each radius. Its main use is for decomposing functions in pupil space
into Zernike components using Gaussian Quadrature integration on
annuli. For more general purpose annular sampling, RayVector.asPolar()
is often a better choice since it samples the pupil more uniformly.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asSpokes` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
spokes : int or ndarray of float
If int, then number of spokes to use.
If ndarray, then the values of the spokes azimuthal angles in
radians.
rings : int or ndarray of float
If int, then number of rings to use.
If array, then the values of the ring radii to use in meters.
spacing : {'uniform', 'GQ'}
If uniform, assign ring radii uniformly between ``inner`` and
``outer``.
If GQ, then assign ring radii as the Gaussian Quadrature points
for integration on an annulus. In this case, the ray fluxes will
be set to the Gaussian Quadrature weights (and the ``flux`` kwarg
will be ignored).
flux : float, optional
Flux to assign each ray. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if isinstance(rings, Integral):
if spacing == 'uniform':
rings = np.linspace(inner, outer, rings)
elif spacing == 'GQ':
if spokes is None:
spokes = 2*rings+1
Li, w = np.polynomial.legendre.leggauss(rings)
eps = inner/outer
area = np.pi*(1-eps**2)
rings = np.sqrt(eps**2 + (1+Li)*(1-eps**2)/2)*outer
flux = w*area/(2*spokes)
if isinstance(spokes, Integral):
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
rings, spokes = np.meshgrid(rings, spokes)
flux = np.broadcast_to(flux, rings.shape)
rings = rings.ravel()
spokes = spokes.ravel()
flux = flux.ravel()
r = np.empty((len(rings), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = rings*np.cos(spokes)
y[:] = rings*np.sin(spokes)
del rings, spokes
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def _finish(cls, backDist, source, dirCos, n, r, w, flux):
"""Map rays backwards to their source position."""
if isinstance(flux, Real):
flux = np.full(len(r), float(flux))
if source is None:
from ._batoid import finishParallel
vv = np.array(dirCos, dtype=float)
vv /= n*np.sqrt(np.dot(vv, vv))
zhat = -n*vv
xhat = np.cross(np.array([1.0, 0.0, 0.0]), zhat)
xhat /= np.sqrt(np.dot(xhat, xhat))
yhat = np.cross(xhat, zhat)
origin = zhat*backDist
rot = np.stack([xhat, yhat, zhat]).T
finishParallel(origin, rot.ravel(), vv, r.ctypes.data, len(r))
v = np.full_like(r, vv)
t = np.zeros(len(r), dtype=float)
vignetted = np.zeros(len(r), dtype=bool)
failed = np.zeros(len(r), dtype=bool)
return RayVector._directInit(
r, v, t, w, flux, vignetted, failed, globalCoordSys
)
else:
v = np.copy(r)
v -= source
v /= n*np.einsum('ab,ab->b', v, v)
r[:] = source
t = np.zeros(len(r), dtype=float)
vignetted = np.zeros(len(r), dtype=bool)
failed = np.zeros(len(r), dtype=bool)
return RayVector._directInit(
r, v, t, w, flux, vignetted, failed, globalCoordSys
)
@classmethod
def fromStop(
cls, x, y,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
flux=1
):
"""Create rays that intersects the "stop" surface at given points.
The algorithm used here starts by placing the rays on the "stop"
surface, and then backing them up such that they are in front of any
surfaces of the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
x, y : ndarray
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where the rays originate. If None, then the rays originate an
infinite distance away, in which case the ``dirCos`` kwarg must also
be specified to set the direction of ray propagation. If an
ndarray, then the rays originates from this point in global
coordinates and the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then indicates the direction of ray propagation.
If source is not None, then this is ignored.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
xx = np.atleast_1d(x)
yy = np.atleast_1d(y)
r = np.empty((len(xx), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = xx
y[:] = yy
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def fromFieldAngles(
cls, theta_x, theta_y, projection='postel',
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
x=0, y=0,
flux=1
):
"""Create RayVector with one stop surface point but many field angles.
This method is similar to `fromStop` but broadcasts over ``theta_x``
and ``theta_y`` instead of over ``x`` and ``y``. There is less
currently less effort paid to synchronizing the ``t`` values of the
created rays, as they don't correspond to points on a physical incoming
wavefront in this case. The primary intended use case is to map chief
rays (``x``=``y``=0) from incoming field angle to focal plane position.
Parameters
----------
theta_x, theta_y : ndarray
Field angles in radians.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface this far. This should
generally be set large enough that any obscurations or phantom
surfaces occuring before the stop surface are now "in front" of the
rays. If this keyword is set to ``None`` and the ``optic`` keyword
is set, then infer a value from ``optic.backDist``. If both this
keyword and ``optic`` are ``None``, then use a default of 40 meters,
which should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
x, y : float
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if wavelength is None:
raise ValueError("Missing wavelength keyword")
vx, vy, vz = fieldToDirCos(theta_x, theta_y, projection=projection)
n = medium.getN(wavelength)
vx /= n
vy /= n
vz /= n
z = stopSurface.surface.sag(x, y)
x = np.full_like(vx, x)
y = np.full_like(vx, y)
z = np.full_like(vx, z)
t = np.zeros_like(vx)
rv = RayVector(
x, y, z,
vx, vy, vz,
t, wavelength, flux,
coordSys=stopSurface.coordSys
)
rv.propagate(-backDist*n)
return rv
@property
def r(self):
"""ndarray of float, shape (n, 3): Positions of rays in meters."""
self._rv.r.syncToHost()
return self._r
@property
def x(self):
"""The x components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 0]
@property
def y(self):
"""The y components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 1]
@property
def z(self):
"""The z components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 2]
@property
def v(self):
"""ndarray of float, shape (n, 3): Velocities of rays in units of the
speed of light in vacuum. Note that these may have magnitudes < 1 if
the rays are inside a refractive medium.
"""
self._rv.v.syncToHost()
return self._v
@property
def vx(self):
"""The x components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 0]
@property
def vy(self):
"""The y components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 1]
@property
def vz(self):
"""The z components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 2]
@property
def t(self):
"""Reference times (divided by the speed of light in vacuum) in units
of meters, also known as the optical path lengths.
"""
self._rv.t.syncToHost()
return self._t
@property
def wavelength(self):
"""Vacuum wavelengths in meters."""
# wavelength is constant, so no need to synchronize
return self._wavelength
@property
def flux(self):
"""Fluxes in arbitrary units."""
self._rv.flux.syncToHost()
return self._flux
@property
def vignetted(self):
"""True for rays that have been vignetted."""
self._rv.vignetted.syncToHost()
return self._vignetted
@property
def failed(self):
"""True for rays that have failed. This may occur, for example, if
batoid failed to find the intersection of a ray wiht a surface.
"""
self._rv.failed.syncToHost()
return self._failed
@property
def k(self):
r"""ndarray of float, shape (n, 3): Wavevectors of plane waves in units
of radians per meter. The magnitude of each wavevector is equal to
:math:`2 \pi n / \lambda`, where :math:`n` is the refractive index and
:math:`\lambda` is the wavelength.
"""
out = 2*np.pi*np.array(self.v)
out /= self.wavelength[:, None]
out /= np.sum(self.v*self.v, axis=-1)[:, None]
return out
@property
def kx(self):
"""The x component of each ray wavevector in radians per meter."""
return self.k[:,0]
@property
def ky(self):
"""The y component of each ray wavevector in radians per meter."""
return self.k[:,1]
@property
def kz(self):
"""The z component of each ray wavevector in radians per meter."""
return self.k[:,2]
@property
def omega(self):
r"""The temporal angular frequency of each plane wave divided by the
vacuum speed of light in units of radians per meter. Equals
:math:`2 \pi / \lambda`.
"""
return 2*np.pi/self.wavelength
@lazy_property
def _rv(self):
return _batoid.CPPRayVector(
self._r.ctypes.data, self._v.ctypes.data, self._t.ctypes.data,
self._wavelength.ctypes.data, self._flux.ctypes.data,
self._vignetted.ctypes.data, self._failed.ctypes.data,
len(self._wavelength)
)
def _syncToHost(self):
if "_rv" not in self.__dict__:
# Was never copied to device, so still synchronized.
return
self._rv.r.syncToHost()
self._rv.v.syncToHost()
self._rv.t.syncToHost()
self._rv.wavelength.syncToHost()
self._rv.flux.syncToHost()
self._rv.vignetted.syncToHost()
self._rv.failed.syncToHost()
def _syncToDevice(self):
self._rv.r.syncToDevice()
self._rv.v.syncToDevice()
self._rv.t.syncToDevice()
self._rv.wavelength.syncToDevice()
self._rv.flux.syncToDevice()
self._rv.vignetted.syncToDevice()
self._rv.failed.syncToDevice()
def copy(self):
# copy on host side for now...
self._syncToHost()
ret = RayVector.__new__(RayVector)
ret._r = np.copy(self._r, order='F')
ret._v = np.copy(self._v, order='F')
ret._t = np.copy(self._t)
ret._wavelength = np.copy(self._wavelength)
ret._flux = np.copy(self._flux)
ret._vignetted = np.copy(self._vignetted)
ret._failed = np.copy(self._failed)
ret.coordSys = self.coordSys.copy()
return ret
def toCoordSys(self, coordSys):
"""Transform this RayVector into a new coordinate system.
Parameters
----------
coordSys: batoid.CoordSys
Destination coordinate system.
Returns
-------
RayVector
Reference to self, no copy is made.
"""
transform = CoordTransform(self.coordSys, coordSys)
applyForwardTransform(transform, self)
return self
def __len__(self):
return self._t.size
def __eq__(self, rhs):
return self._rv == rhs._rv
def __ne__(self, rhs):
return self._rv != rhs._rv
def __repr__(self):
out = f"RayVector({self.x!r}, {self.y!r}, {self.z!r}"
out += f", {self.vx!r}, {self.vy!r}, {self.vz!r}"
out += f", {self.t!r}, {self.wavelength!r}, {self.flux!r}"
out += f", {self.vignetted!r}, {self.failed!r}, {self.coordSys!r})"
return out
def __getstate__(self):
return (
self.r, self.v, self.t,
self.wavelength, self.flux,
self.vignetted, self.failed, self.coordSys
)
def __setstate__(self, args):
(self._r, self._v, self._t,
self._wavelength, self._flux, self._vignetted,
self._failed, self.coordSys) = args
def __getitem__(self, idx):
if isinstance(idx, int):
if idx >= 0:
if idx >= self._rv.t.size:
msg = "index {} is out of bounds for axis 0 with size {}"
msg = msg.format(idx, self._rv.t.size)
raise IndexError(msg)
idx = slice(idx, idx+1)
else:
if idx < -self._rv.t.size:
msg = "index {} is out of bounds for axis 0 with size {}"
msg = msg.format(idx, self._rv.t.size)
raise IndexError(msg)
idx = slice(self._rv.t.size+idx, self._rv.t.size-idx+1)
self._syncToHost()
return RayVector._directInit(
np.copy(self._r[idx], order='F'),
np.copy(self._v[idx], order='F'),
np.copy(self._t[idx]),
np.copy(self._wavelength[idx]),
np.copy(self._flux[idx]),
np.copy(self._vignetted[idx]),
np.copy(self._failed[idx]),
self.coordSys
)
def concatenateRayVectors(rvs):
return RayVector(
np.hstack([rv.x for rv in rvs]),
np.hstack([rv.y for rv in rvs]),
np.hstack([rv.z for rv in rvs]),
np.hstack([rv.vx for rv in rvs]),
np.hstack([rv.vy for rv in rvs]),
np.hstack([rv.vz for rv in rvs]),
np.hstack([rv.t for rv in rvs]),
np.hstack([rv.wavelength for rv in rvs]),
np.hstack([rv.flux for rv in rvs]),
np.hstack([rv.vignetted for rv in rvs]),
np.hstack([rv.failed for rv in rvs]),
rvs[0].coordSys
)
| 5,133 | 0 | 394 |
9ee4c51c283ca08731444b4a226affca37db5cb6 | 392 | py | Python | infinitd_server/handler/battleground_state.py | rhofour/InfiniTDBackend | 8763d64a82d02e4282abff5419e1ab256af41d7e | [
"MIT"
] | null | null | null | infinitd_server/handler/battleground_state.py | rhofour/InfiniTDBackend | 8763d64a82d02e4282abff5419e1ab256af41d7e | [
"MIT"
] | null | null | null | infinitd_server/handler/battleground_state.py | rhofour/InfiniTDBackend | 8763d64a82d02e4282abff5419e1ab256af41d7e | [
"MIT"
] | null | null | null | from infinitd_server.db import Db
from infinitd_server.sse import SseQueues
from infinitd_server.handler.sse import SseStreamHandler
| 26.133333 | 56 | 0.737245 | from infinitd_server.db import Db
from infinitd_server.sse import SseQueues
from infinitd_server.handler.sse import SseStreamHandler
class BattlegroundStateHandler(SseStreamHandler):
db: Db
queues: SseQueues
def initialize(self, db, queues):
self.db = db
self.queues = queues
async def initialState(self, name):
return self.db.getBattleground(name)
| 121 | 115 | 23 |
e0103226cbd1c63257838870dd3a503fbf50d375 | 186 | py | Python | close_crawl/modules/__init__.py | kug3lblitz/maryland-foreclosure-scraper | d1b8851efb4f82bc305f5d23c079b1f83ef73be4 | [
"MIT"
] | 6 | 2018-04-24T05:35:38.000Z | 2021-04-03T23:53:24.000Z | close_crawl/modules/__init__.py | kug3lblitz/maryland-foreclosure-scraper | d1b8851efb4f82bc305f5d23c079b1f83ef73be4 | [
"MIT"
] | 22 | 2016-12-27T16:58:01.000Z | 2017-04-14T15:15:46.000Z | close_crawl/modules/__init__.py | kug3lblitz/maryland-foreclosure-scraper | d1b8851efb4f82bc305f5d23c079b1f83ef73be4 | [
"MIT"
] | 4 | 2016-12-22T22:07:18.000Z | 2020-01-03T04:31:08.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""modules
"""
import __builtin__
del __builtin__.range
__builtin__.range = xrange
del __builtin__.input
__builtin__.input = raw_input
| 12.4 | 29 | 0.72043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""modules
"""
import __builtin__
del __builtin__.range
__builtin__.range = xrange
del __builtin__.input
__builtin__.input = raw_input
| 0 | 0 | 0 |
e1d31e54e7143223c0acc8da71319bbe91e00772 | 442 | py | Python | libsaas/services/basecamp/comments.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 155 | 2015-01-27T15:17:59.000Z | 2022-02-20T00:14:08.000Z | libsaas/services/basecamp/comments.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 14 | 2015-01-12T08:22:37.000Z | 2021-06-16T19:49:31.000Z | libsaas/services/basecamp/comments.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 43 | 2015-01-28T22:41:45.000Z | 2021-09-21T04:44:26.000Z | from libsaas.services import base
from .resource import BasecampResource
| 19.217391 | 40 | 0.701357 | from libsaas.services import base
from .resource import BasecampResource
class CommentResource(BasecampResource):
path = 'comments'
class Comments(CommentResource):
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
class Comment(CommentResource):
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
| 153 | 62 | 150 |
731701cb8c9d654fd7bbc4b44d75644b800e095a | 1,019 | py | Python | applications/urls.py | tiferrei/PEDASI | b819aee93de99c00a1aa3eb9d32102b89f72459e | [
"MIT"
] | null | null | null | applications/urls.py | tiferrei/PEDASI | b819aee93de99c00a1aa3eb9d32102b89f72459e | [
"MIT"
] | 18 | 2019-02-27T12:39:27.000Z | 2021-03-24T16:32:47.000Z | applications/urls.py | Southampton-RSG/PEDASI-IoT | 25a111ac7cf4b23fee50ad8eac6ea21564954859 | [
"MIT"
] | 1 | 2021-02-16T17:47:15.000Z | 2021-02-16T17:47:15.000Z | from django.urls import path
from . import views
app_name = 'applications'
urlpatterns = [
path('',
views.ApplicationListView.as_view(),
name='application.list'),
path('add',
views.ApplicationCreateView.as_view(),
name='application.add'),
path('<int:pk>/',
views.ApplicationDetailView.as_view(),
name='application.detail'),
path('<int:pk>/edit',
views.ApplicationUpdateView.as_view(),
name='application.edit'),
path('<int:pk>/delete',
views.ApplicationDeleteView.as_view(),
name='application.delete'),
path('<int:pk>/token',
views.ApplicationManageTokenView.as_view(),
name='token'),
path('<int:pk>/manage-access',
views.ApplicationManageAccessView.as_view(),
name='application.manage-access'),
path('<int:pk>/manage-access/users/<int:user_pk>',
views.ApplicationManageAccessView.as_view(),
name='application.manage-access.user'),
]
| 25.475 | 54 | 0.621197 | from django.urls import path
from . import views
app_name = 'applications'
urlpatterns = [
path('',
views.ApplicationListView.as_view(),
name='application.list'),
path('add',
views.ApplicationCreateView.as_view(),
name='application.add'),
path('<int:pk>/',
views.ApplicationDetailView.as_view(),
name='application.detail'),
path('<int:pk>/edit',
views.ApplicationUpdateView.as_view(),
name='application.edit'),
path('<int:pk>/delete',
views.ApplicationDeleteView.as_view(),
name='application.delete'),
path('<int:pk>/token',
views.ApplicationManageTokenView.as_view(),
name='token'),
path('<int:pk>/manage-access',
views.ApplicationManageAccessView.as_view(),
name='application.manage-access'),
path('<int:pk>/manage-access/users/<int:user_pk>',
views.ApplicationManageAccessView.as_view(),
name='application.manage-access.user'),
]
| 0 | 0 | 0 |
b174c8698e60a85225a80fede25eadaaed2b1265 | 165 | py | Python | naoqi_sensors_py/nodes/camera.py | chaotic-bruno/naoqi_bridge | 7543857da53e845c34d5336565a2890609ff3809 | [
"BSD-3-Clause"
] | 31 | 2015-02-14T15:56:44.000Z | 2022-01-13T06:38:13.000Z | naoqi_sensors_py/nodes/camera.py | chaotic-bruno/naoqi_bridge | 7543857da53e845c34d5336565a2890609ff3809 | [
"BSD-3-Clause"
] | 59 | 2015-01-22T14:23:32.000Z | 2020-04-13T11:51:00.000Z | naoqi_sensors_py/nodes/camera.py | chaotic-bruno/naoqi_bridge | 7543857da53e845c34d5336565a2890609ff3809 | [
"BSD-3-Clause"
] | 37 | 2015-01-22T22:18:25.000Z | 2021-07-17T06:49:04.000Z | #!/usr/bin/env python
import rospy
from naoqi_sensors.naoqi_camera import NaoqiCam
if __name__ == "__main__":
naocam = NaoqiCam()
naocam.start()
rospy.spin()
| 18.333333 | 47 | 0.733333 | #!/usr/bin/env python
import rospy
from naoqi_sensors.naoqi_camera import NaoqiCam
if __name__ == "__main__":
naocam = NaoqiCam()
naocam.start()
rospy.spin()
| 0 | 0 | 0 |
f2f4c9c9bcb13cbdb45a478a7f967cf4d6edf375 | 11,748 | py | Python | billiards/baidubce/utils.py | zxkane/billiards | 809a37b111a0fdbf7a2b1176149256b93c43045f | [
"Apache-1.1"
] | null | null | null | billiards/baidubce/utils.py | zxkane/billiards | 809a37b111a0fdbf7a2b1176149256b93c43045f | [
"Apache-1.1"
] | null | null | null | billiards/baidubce/utils.py | zxkane/billiards | 809a37b111a0fdbf7a2b1176149256b93c43045f | [
"Apache-1.1"
] | 1 | 2021-02-08T13:19:34.000Z | 2021-02-08T13:19:34.000Z | # Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provide some tools for bce client.
"""
import StringIO
import cStringIO
import os
import re
import datetime
import hashlib
import base64
import string
import baidubce
from baidubce.http import http_headers
def get_md5_from_fp(fp, offset=0, length=-1, buf_size=8192):
"""
Get MD5 from file by fp.
:type fp: FileIO
:param fp: None
:type offset: long
:param offset: None
:type length: long
:param length: None
=======================
:return:
**file_size, MD(encode by base64)**
"""
origin_offset = fp.tell()
if offset:
fp.seek(offset)
md5 = hashlib.md5()
while True:
bytes_to_read = buf_size
if bytes_to_read > length > 0:
bytes_to_read = length
buf = fp.read(bytes_to_read)
if not buf:
break
md5.update(buf)
if length > 0:
length -= len(buf)
if length == 0:
break
fp.seek(origin_offset)
return base64.standard_b64encode(md5.digest())
def get_canonical_time(timestamp=0):
"""
Get cannonical time.
:type timestamp: int
:param timestamp: None
=======================
:return:
**string of canonical_time**
"""
if timestamp == 0:
utctime = datetime.datetime.utcnow()
else:
utctime = datetime.datetime.utcfromtimestamp(timestamp)
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
utctime.year, utctime.month, utctime.day,
utctime.hour, utctime.minute, utctime.second)
def is_ip(s):
"""
Check a string whether is a legal ip address.
:type s: string
:param s: None
=======================
:return:
**Boolean**
"""
try:
tmp_list = s.split(':')
s = tmp_list[0]
if s == 'localhost':
return True
tmp_list = s.split('.')
if len(tmp_list) != 4:
return False
else:
for i in tmp_list:
if int(i) < 0 or int(i) > 255:
return False
except:
return False
return True
def convert_to_standard_string(input_string):
"""
Encode a string to utf-8.
:type input_string: string
:param input_string: None
=======================
:return:
**string**
"""
if isinstance(input_string, unicode):
return input_string.encode(baidubce.DEFAULT_ENCODING)
else:
return str(input_string)
def convert_header2map(header_list):
"""
Transfer a header list to dict
:type s: list
:param s: None
=======================
:return:
**dict**
"""
header_map = {}
for a, b in header_list:
if isinstance(a, str):
a = a.strip('\"')
if isinstance(b, str):
b = b.strip('\"')
header_map[a] = b
return header_map
def safe_get_element(name, container):
"""
Get element from dict which the lower of key and name are equal.
:type name: string
:param name: None
:type container: dict
:param container: None
=======================
:return:
**Value**
"""
for k, v in container.items():
if k.strip().lower() == name.strip().lower():
return v
return ""
def check_redirect(res):
"""
Check whether the response is redirect.
:type res: HttpResponse
:param res: None
:return:
**Boolean**
"""
is_redirect = False
try:
if res.status == 301 or res.status == 302:
is_redirect = True
except:
pass
return is_redirect
_NORMALIZED_CHAR_LIST = _get_normalized_char_list()
def normalize_string(in_str, encoding_slash=True):
"""
Encode in_str.
When encoding_slash is True, don't encode skip_chars, vice versa.
:type in_str: string
:param in_str: None
:type encoding_slash: Bool
:param encoding_slash: None
===============================
:return:
**string**
"""
tmp = []
for ch in convert_to_standard_string(in_str):
if ch == '/' and not encoding_slash:
tmp.append('/')
else:
tmp.append(_NORMALIZED_CHAR_LIST[ord(ch)])
return ''.join(tmp)
def append_uri(base_uri, *path_components):
"""
Append path_components to the end of base_uri in order, and ignore all empty strings and None
:param base_uri: None
:type base_uri: string
:param path_components: None
:return: the final url
:rtype: str
"""
tmp = [base_uri]
for path in path_components:
if path:
tmp.append(normalize_string(path, False))
if len(tmp) > 1:
tmp[0] = tmp[0].rstrip('/')
tmp[-1] = tmp[-1].lstrip('/')
for i in range(1, len(tmp)):
tmp[i] = tmp[i].strip('/')
return '/'.join(tmp)
def check_bucket_valid(bucket):
"""
Check bucket name whether is legal.
:type bucket: string
:param bucket: None
=======================
:return:
**Boolean**
"""
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-"
if len(bucket) < 3 or len(bucket) > 63:
return False
if bucket[-1] == "-" or bucket[-1] == "_":
return False
if not (('a' <= bucket[0] <= 'z') or ('0' <= bucket[0] <= '9')):
return False
for i in bucket:
if not i in alphabet:
return False
return True
def guess_content_type_by_file_name(file_name):
"""
Get file type by filename.
:type file_name: string
:param file_name: None
=======================
:return:
**Type Value**
"""
mime_map = dict()
mime_map["js"] = "application/javascript"
mime_map["xlsx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
mime_map["xltx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.template"
mime_map["potx"] = "application/vnd.openxmlformats-officedocument.presentationml.template"
mime_map["ppsx"] = "application/vnd.openxmlformats-officedocument.presentationml.slideshow"
mime_map["pptx"] = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
mime_map["sldx"] = "application/vnd.openxmlformats-officedocument.presentationml.slide"
mime_map["docx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
mime_map["dotx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.template"
mime_map["xlam"] = "application/vnd.ms-excel.addin.macroEnabled.12"
mime_map["xlsb"] = "application/vnd.ms-excel.sheet.binary.macroEnabled.12"
try:
name = os.path.basename(file_name)
suffix = name.split('.')[-1]
if suffix in mime_map.keys():
mime_type = mime_map[suffix]
else:
import mimetypes
mimetypes.init()
mime_type = mimetypes.types_map["." + suffix]
except:
mime_type = 'application/octet-stream'
if not mime_type:
mime_type = 'application/octet-stream'
return mime_type
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]{2,})')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
def pythonize_name(name):
"""Convert camel case to a "pythonic" name.
Examples::
pythonize_name('CamelCase') -> 'camel_case'
pythonize_name('already_pythonized') -> 'already_pythonized'
pythonize_name('HTTPRequest') -> 'http_request'
pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok'
pythonize_name('UPPER') -> 'upper'
pythonize_name('ContentMd5')->'content_md5'
pythonize_name('') -> ''
"""
if name == "eTag":
return "etag"
s1 = _first_cap_regex.sub(r'\1_\2', name)
s2 = _number_cap_regex.sub(r'\1_\2', s1)
return _end_cap_regex.sub(r'\1_\2', s2).lower()
def get_canonical_querystring(params, for_signature):
"""
:param params:
:param for_signature:
:return:
"""
if params is None:
return ''
result = []
for k, v in params.items():
if not for_signature or k.lower != http_headers.AUTHORIZATION.lower():
if v is None:
v = ''
result.append('%s=%s' % (k, normalize_string(v)))
result.sort()
return '&'.join(result)
def print_object(obj):
"""
:param obj:
:return:
"""
tmp = []
for k, v in obj.__dict__.items():
if not k.startswith('__'):
if isinstance(v, str):
tmp.append("%s:'%s'" % (k, v))
elif isinstance(v, unicode):
tmp.append("%s:u'%s'" % (k, v))
else:
tmp.append('%s:%s' % (k, v))
return '{%s}' % ','.join(tmp)
class Expando(object):
"""
Expandable class
"""
def dict_to_python_object(d):
"""
:param d:
:return:
"""
attr = {}
for k, v in d.items():
k = pythonize_name(str(k))
attr[k] = v
return Expando(attr)
def required(**types):
"""
decorator of input param check
:param types:
:return:
"""
return _required | 27.904988 | 99 | 0.548945 | # Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provide some tools for bce client.
"""
import StringIO
import cStringIO
import os
import re
import datetime
import hashlib
import base64
import string
import baidubce
from baidubce.http import http_headers
def get_md5_from_fp(fp, offset=0, length=-1, buf_size=8192):
"""
Get MD5 from file by fp.
:type fp: FileIO
:param fp: None
:type offset: long
:param offset: None
:type length: long
:param length: None
=======================
:return:
**file_size, MD(encode by base64)**
"""
origin_offset = fp.tell()
if offset:
fp.seek(offset)
md5 = hashlib.md5()
while True:
bytes_to_read = buf_size
if bytes_to_read > length > 0:
bytes_to_read = length
buf = fp.read(bytes_to_read)
if not buf:
break
md5.update(buf)
if length > 0:
length -= len(buf)
if length == 0:
break
fp.seek(origin_offset)
return base64.standard_b64encode(md5.digest())
def get_canonical_time(timestamp=0):
"""
Get cannonical time.
:type timestamp: int
:param timestamp: None
=======================
:return:
**string of canonical_time**
"""
if timestamp == 0:
utctime = datetime.datetime.utcnow()
else:
utctime = datetime.datetime.utcfromtimestamp(timestamp)
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
utctime.year, utctime.month, utctime.day,
utctime.hour, utctime.minute, utctime.second)
def is_ip(s):
"""
Check a string whether is a legal ip address.
:type s: string
:param s: None
=======================
:return:
**Boolean**
"""
try:
tmp_list = s.split(':')
s = tmp_list[0]
if s == 'localhost':
return True
tmp_list = s.split('.')
if len(tmp_list) != 4:
return False
else:
for i in tmp_list:
if int(i) < 0 or int(i) > 255:
return False
except:
return False
return True
def convert_to_standard_string(input_string):
"""
Encode a string to utf-8.
:type input_string: string
:param input_string: None
=======================
:return:
**string**
"""
if isinstance(input_string, unicode):
return input_string.encode(baidubce.DEFAULT_ENCODING)
else:
return str(input_string)
def convert_header2map(header_list):
"""
Transfer a header list to dict
:type s: list
:param s: None
=======================
:return:
**dict**
"""
header_map = {}
for a, b in header_list:
if isinstance(a, str):
a = a.strip('\"')
if isinstance(b, str):
b = b.strip('\"')
header_map[a] = b
return header_map
def safe_get_element(name, container):
"""
Get element from dict which the lower of key and name are equal.
:type name: string
:param name: None
:type container: dict
:param container: None
=======================
:return:
**Value**
"""
for k, v in container.items():
if k.strip().lower() == name.strip().lower():
return v
return ""
def check_redirect(res):
"""
Check whether the response is redirect.
:type res: HttpResponse
:param res: None
:return:
**Boolean**
"""
is_redirect = False
try:
if res.status == 301 or res.status == 302:
is_redirect = True
except:
pass
return is_redirect
def _get_normalized_char_list():
ret = ['%%%02X' % i for i in range(256)]
for ch in string.ascii_letters + string.digits + '.~-_':
ret[ord(ch)] = ch
return ret
_NORMALIZED_CHAR_LIST = _get_normalized_char_list()
def normalize_string(in_str, encoding_slash=True):
"""
Encode in_str.
When encoding_slash is True, don't encode skip_chars, vice versa.
:type in_str: string
:param in_str: None
:type encoding_slash: Bool
:param encoding_slash: None
===============================
:return:
**string**
"""
tmp = []
for ch in convert_to_standard_string(in_str):
if ch == '/' and not encoding_slash:
tmp.append('/')
else:
tmp.append(_NORMALIZED_CHAR_LIST[ord(ch)])
return ''.join(tmp)
def append_uri(base_uri, *path_components):
"""
Append path_components to the end of base_uri in order, and ignore all empty strings and None
:param base_uri: None
:type base_uri: string
:param path_components: None
:return: the final url
:rtype: str
"""
tmp = [base_uri]
for path in path_components:
if path:
tmp.append(normalize_string(path, False))
if len(tmp) > 1:
tmp[0] = tmp[0].rstrip('/')
tmp[-1] = tmp[-1].lstrip('/')
for i in range(1, len(tmp)):
tmp[i] = tmp[i].strip('/')
return '/'.join(tmp)
def check_bucket_valid(bucket):
"""
Check bucket name whether is legal.
:type bucket: string
:param bucket: None
=======================
:return:
**Boolean**
"""
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-"
if len(bucket) < 3 or len(bucket) > 63:
return False
if bucket[-1] == "-" or bucket[-1] == "_":
return False
if not (('a' <= bucket[0] <= 'z') or ('0' <= bucket[0] <= '9')):
return False
for i in bucket:
if not i in alphabet:
return False
return True
def guess_content_type_by_file_name(file_name):
"""
Get file type by filename.
:type file_name: string
:param file_name: None
=======================
:return:
**Type Value**
"""
mime_map = dict()
mime_map["js"] = "application/javascript"
mime_map["xlsx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
mime_map["xltx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.template"
mime_map["potx"] = "application/vnd.openxmlformats-officedocument.presentationml.template"
mime_map["ppsx"] = "application/vnd.openxmlformats-officedocument.presentationml.slideshow"
mime_map["pptx"] = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
mime_map["sldx"] = "application/vnd.openxmlformats-officedocument.presentationml.slide"
mime_map["docx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
mime_map["dotx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.template"
mime_map["xlam"] = "application/vnd.ms-excel.addin.macroEnabled.12"
mime_map["xlsb"] = "application/vnd.ms-excel.sheet.binary.macroEnabled.12"
try:
name = os.path.basename(file_name)
suffix = name.split('.')[-1]
if suffix in mime_map.keys():
mime_type = mime_map[suffix]
else:
import mimetypes
mimetypes.init()
mime_type = mimetypes.types_map["." + suffix]
except:
mime_type = 'application/octet-stream'
if not mime_type:
mime_type = 'application/octet-stream'
return mime_type
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]{2,})')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
def pythonize_name(name):
"""Convert camel case to a "pythonic" name.
Examples::
pythonize_name('CamelCase') -> 'camel_case'
pythonize_name('already_pythonized') -> 'already_pythonized'
pythonize_name('HTTPRequest') -> 'http_request'
pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok'
pythonize_name('UPPER') -> 'upper'
pythonize_name('ContentMd5')->'content_md5'
pythonize_name('') -> ''
"""
if name == "eTag":
return "etag"
s1 = _first_cap_regex.sub(r'\1_\2', name)
s2 = _number_cap_regex.sub(r'\1_\2', s1)
return _end_cap_regex.sub(r'\1_\2', s2).lower()
def get_canonical_querystring(params, for_signature):
"""
:param params:
:param for_signature:
:return:
"""
if params is None:
return ''
result = []
for k, v in params.items():
if not for_signature or k.lower != http_headers.AUTHORIZATION.lower():
if v is None:
v = ''
result.append('%s=%s' % (k, normalize_string(v)))
result.sort()
return '&'.join(result)
def print_object(obj):
"""
:param obj:
:return:
"""
tmp = []
for k, v in obj.__dict__.items():
if not k.startswith('__'):
if isinstance(v, str):
tmp.append("%s:'%s'" % (k, v))
elif isinstance(v, unicode):
tmp.append("%s:u'%s'" % (k, v))
else:
tmp.append('%s:%s' % (k, v))
return '{%s}' % ','.join(tmp)
class Expando(object):
"""
Expandable class
"""
def __init__(self, attr_dict=None):
if attr_dict:
self.__dict__.update(attr_dict)
def __getattr__(self, item):
if item.startswith('__'):
raise AttributeError
return None
def __repr__(self):
return print_object(self)
def dict_to_python_object(d):
"""
:param d:
:return:
"""
attr = {}
for k, v in d.items():
k = pythonize_name(str(k))
attr[k] = v
return Expando(attr)
def required(**types):
"""
decorator of input param check
:param types:
:return:
"""
def _required(f):
def _decorated(*args, **kwds):
for i, v in enumerate(args):
if f.func_code.co_varnames[i] in types:
if v is None:
raise ValueError('arg "%s" should not be None' %
(f.func_code.co_varnames[i]))
if not isinstance(v, types[f.func_code.co_varnames[i]]):
raise TypeError('arg "%s"= %r does not match %s' %
(f.func_code.co_varnames[i],
v,
types[f.func_code.co_varnames[i]]))
for k, v in kwds.iteritems():
if k in types:
if v is None:
raise ValueError('arg "%s" should not be None' % k)
if not isinstance(v, types[k]):
raise TypeError('arg "%s"= %r does not match %s' % (k, v, types[k]))
return f(*args, **kwds)
_decorated.func_name = f.func_name
return _decorated
return _required | 1,479 | 0 | 137 |
5a1afd8c5630582cc64457b43d841f4892e391d3 | 151 | py | Python | src/alexacloud/alexa/forms.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | src/alexacloud/alexa/forms.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | src/alexacloud/alexa/forms.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | from django import forms
| 16.777778 | 56 | 0.754967 | from django import forms
class AudioQueryForm(forms.Form):
audioFile = forms.FileField()
lang = forms.CharField(required=False, initial="en-us")
| 0 | 100 | 23 |
c4347f59c9577acccbdac39759a618e080156846 | 7,868 | py | Python | openstates/openstates-master/scripts/affected_code/core/ny.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/scripts/affected_code/core/ny.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/scripts/affected_code/core/ny.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | import copy
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
import .base
SectionID = Token.Section.ID
NodeType = Token.Node.Type
NodeID = Token.Node.ID
NodeAndOrComma = Token.Node.AndOrComma
DiffSpec = Token.DiffSpec
AmendedAsFollows = DiffSpec.AmendedAsFollows
AmendedByAdding = DiffSpec.AmendedByAdding
Renumbered = DiffSpec.Renumbered
SessionLawChapter = Token.SessionLawChapter
SessionLawYear = Token.SessionLawYear
ActName = Token.ActName
CompilationName = Token.CompilationName
Junk = Token.Junk
subds = ['paragraph', 'division', 'chapter', 'section', 'clause',
'article', 'part']
subds += ['sub' + s for s in subds]
subds = r'(%s)' % '|'.join(sorted(subds, key=len, reverse=True))
| 33.058824 | 87 | 0.541434 | import copy
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
import .base
SectionID = Token.Section.ID
NodeType = Token.Node.Type
NodeID = Token.Node.ID
NodeAndOrComma = Token.Node.AndOrComma
DiffSpec = Token.DiffSpec
AmendedAsFollows = DiffSpec.AmendedAsFollows
AmendedByAdding = DiffSpec.AmendedByAdding
Renumbered = DiffSpec.Renumbered
SessionLawChapter = Token.SessionLawChapter
SessionLawYear = Token.SessionLawYear
ActName = Token.ActName
CompilationName = Token.CompilationName
Junk = Token.Junk
subds = ['paragraph', 'division', 'chapter', 'section', 'clause',
'article', 'part']
subds += ['sub' + s for s in subds]
subds = r'(%s)' % '|'.join(sorted(subds, key=len, reverse=True))
class Lexer(RegexLexer):
tokens = {
'root': [
# Match 'Section 1' and 'S 2' section headings.
(r' +Section (1).\s+', bygroups(SectionID)),
(r' {1,2}S {1,2}(\d+)\.', bygroups(SectionID)),
# Match singular path elements.
(r'(?i)(?: of (?:a )?)?(%s) ' % subds,
bygroups(NodeType), 'path'),
# Match plural path elements.
(r'(?i)(%s)s ' % subds, bygroups(NodeType.Plural), 'path'),
# Match act name.
(r', constituting the (.{,250}? act),', bygroups(ActName)),
# Amended as follows variants.
(r' (is|are) amended to read as follows:', AmendedAsFollows),
(r'amended to read as follows:', AmendedAsFollows),
# Amended by adding variants.
(r' (is|are) amended and \w+ new', AmendedByAdding, 'path'),
(r' is amended by adding', AmendedByAdding, 'path'),
(r'amended by adding', AmendedByAdding, 'path'),
# Compilation name.
(r'(?i)the ([A-Za-z .&]+ (:?law|rules|code of the city of New York))',
bygroups(CompilationName)),
(r'(added|amended|renumbered) by',
# (r',? (:?(:?as|and) )?(added|amended|renumbered) by',
Token.RevisionSpec, 'path'),
# Junk.
# (r'amending [^,]+', Junk, 'junk'),
# (r'(added|amended|renumbered) [^,]+', Junk, 'junk'),
# (r'%s .{,200}? as (?:added|amended) by[^,]+?, ' % subds, Token.Junk),
# Renumbered variants.
(r' is renumbered', Renumbered),
(r'renumbered', Renumbered),
(r'\band\b', Token.And)
],
'path': [
(r',? (:?(:?as|and) )?(added|amended|renumbered) by', Token.RevisionSpec),
(r' local law number (\w+) of the city of (.+?) for the year (\w+)',
bygroups(Token.LocalLaw.Number,
Token.LocalLaw.Jxn,
Token.LocalLaw.Year), '#pop'),
(r' local law number (\w+)',
bygroups(Token.LocalLaw.Number), '#pop'),
# "of the codes and ordinances of the city of Yonkers..."
(r' of the (.+?) of the city of (.+?)(?:,|is)',
bygroups(Token.MunicipalLaw.Name, Token.MunicipalLaw.Jxn), '#pop'),
(r' of the laws of (\d{4})', bygroups(SessionLawYear), '#pop'),
(r'(?i)of the ([A-Za-z \-.&]+ (:?law|rules|code of the city of New York))',
bygroups(CompilationName), '#pop'),
(r'(?i)(?: of (?:a )?)?(%s) ' % subds,
bygroups(NodeType)),
(r'are added', Token.Added, '#pop'),
(r'to read as follows', Junk, '#pop'),
(r'of ', Token.Of, '#pop'),
(r'[^ ,]+', NodeID),
(r',? and ', NodeAndOrComma),
(r', ', NodeAndOrComma),
],
'junk': [
(r'(?!(is|are) (amended|renumbered|repealed)).', Junk),
(r'(is|are) amended to read as follows:', AmendedAsFollows, '#pop'),
(r' (is|are) amended and \w+ new', AmendedByAdding, 'path'),
(r' is amended by adding', AmendedByAdding, '#pop'),
(r'is renumbered', Renumbered, '#pop'),
]
}
class ParserState(dict):
def __init__(self):
self._current_path = None
self._current_node = None
self['paths'] = []
def finalize(self):
return dict(self)
def section_set_id(self, text=None, *args, **kwargs):
self['section'] = text
def path_new(self, text=None, *args, **kwargs):
path = []
self['paths'].append(path)
self._current_path = path
return path
@property
def path_current(self, text=None, *args, **kwargs):
if self._current_path is None:
return self.path_new()
else:
return self._current_path
def node_new(self, text=None, *args, **kwargs):
node = {}
self.path_current.append(node)
self._current_node = node
return node
@property
def node_current(self, text=None, *args, **kwargs):
if self._current_node is None:
return self._new_node()
else:
return self._current_node
def node_set_id(self, text=None, *args, **kwargs):
text = text.rstrip('.')
self.node_current['id'] = text
def node_set_type(self, text=None, *args, **kwargs):
text = text.lower().rstrip('s')
node_current = self.node_current
if 'type' not in node_current:
node_current['type'] = text
else:
self.node_new()['type'] = text
def path_clone(self, text=None, *args, **kwargs):
new_path = copy.deepcopy(self.path_current)
self['paths'].append(new_path)
self._current_path = new_path
self._current_node = new_path[-1]
return new_path
def path_set_compilation_name(self, text=None, *args, **kwargs):
self['type'] = 'statute'
self['id'] = text
def path_set_session_law_year(self, text=None, *args, **kwargs):
self['type'] = 'session_law'
self['year'] = text
def path_set_session_law_chapter(self, text=None, *args, **kwargs):
self['type'] = 'session_law'
self['chapter'] = text
def path_set_act_name(self, text=None, *args, **kwargs):
self['act_name'] = text.strip(', ')
def amended_by_adding(self, text=None, *args, **kwargs):
paths = []
path = []
node = {}
path.append(node)
paths.append(path)
self._current_node = node
self._current_path = path
self['impact'] = 'added'
self['details'] = paths
def amended_as_follows(self, text=None, *args, **kwargs):
self['impact'] = 'amended'
def renumbered(self, text=None, *args, **kwargs):
paths = []
path = []
node = {}
path.append(node)
paths.append(path)
self._current_node = node
self._current_path = path
self['impact'] = 'renumbered'
self['details'] = paths
class Parser(base.Parser):
ignored_token_types = [Token.Error]
rules = {
'root': [
(SectionID, 'section_set_id'),
(NodeType, 'path_new node_new node_set_type', 'path'),
(NodeType.Plural, 'path_new_parallel node_new node_set_type', 'path'),
(AmendedAsFollows, 'amended_as_follows'),
(AmendedByAdding, 'amended_by_adding', 'path'),
(Renumbered, 'renumbered', 'path'),
(ActName, 'path_set_act_name'),
(Junk, ''),
(CompilationName, 'path_set_compilation_name'),
],
'path': [
(SessionLawYear, 'path_set_session_law_year', '#pop'),
(CompilationName, 'path_set_compilation_name', '#pop'),
(NodeID, 'node_set_id'),
(NodeType, 'node_set_type'),
(NodeAndOrComma, 'path_clone'),
(Junk, '', '#pop')
],
}
| 2,358 | 4,707 | 69 |
9acdfdb398321632d4a644bbb2ca75d2a1e5f6e2 | 5,501 | py | Python | zkyhaxpy/pd_tools.py | surasakcho/pyzkyhax | aada94dc211f294f0e91ff39ca31715bcc468b25 | [
"MIT"
] | null | null | null | zkyhaxpy/pd_tools.py | surasakcho/pyzkyhax | aada94dc211f294f0e91ff39ca31715bcc468b25 | [
"MIT"
] | null | null | null | zkyhaxpy/pd_tools.py | surasakcho/pyzkyhax | aada94dc211f294f0e91ff39ca31715bcc468b25 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from IPython.display import display, HTML, display_html
from tqdm.notebook import tqdm
def auto_adjust():
'''
Set column width = 100
Max displayed rows = 100
Max displayed columns = 100
'''
set_colwidth(100)
pd.options.display.max_rows = 100
pd.options.display.max_columns = 100
def set_max_rows(max_rows=100):
'''
Set max display rows
Return : None
'''
pd.options.display.max_rows = max_rows
def set_max_columns(max_columns=100):
'''
Set max display columns
Return : None
'''
pd.options.display.max_columns = max_columns
def display_html(df):
'''
display a dataframe as html table
'''
display(HTML(df.to_html()))
def inc_colwidth(inc_colwidth=20,target_colwidth=None):
'''
Increase column width of pandas dataframe display
Return : None
'''
if target_colwidth == None:
curr_max_colwidth = pd.get_option("display.max_colwidth")
new_max_colwidth = curr_max_colwidth + inc_colwidth
pd.set_option('max_colwidth', new_max_colwidth)
else:
pd.set_option('max_colwidth', target_colwidth)
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def dec_colwidth(dec_colwidth=20,target_colwidth=None):
'''
Decrease column width of pandas dataframe display
Return : None
'''
if target_colwidth == None:
curr_max_colwidth = pd.get_option("display.max_colwidth")
new_max_colwidth = curr_max_colwidth - dec_colwidth
pd.set_option('max_colwidth', new_max_colwidth)
else:
pd.set_option('max_colwidth', target_colwidth)
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def set_colwidth(target_colwidth=100):
'''
Decrease column width of pandas dataframe display
Return : None
'''
pd.set_option('max_colwidth', target_colwidth)
def get_curr_colwidth():
'''
Decrease column width of pandas dataframe display
Return : None
'''
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def read_parquets(list_file_path, columns='all'):
'''
Read multiple parquet files of the same template into a single pandas dataframe.
'''
list_df = []
for file_path in tqdm(list_file_path, 'reading parquets...'):
if columns=='all':
list_df.append(pd.read_parquet(file_path))
else:
list_df.append(pd.read_parquet(file_path, columns=columns))
df = pd.concat(list_df)
return df
def convert_dtypes(in_df, in_dict_dtypes, default_dtype=None):
'''
Convert dtypes of a dataframe according to given dict of column names and dtypes.
'''
in_df = in_df.copy()
for col_nm in in_df.columns:
if col_nm in in_dict_dtypes.keys():
if in_df[col_nm].dtype != in_dict_dtypes[col_nm]:
in_df[col_nm] = in_df[col_nm].astype(in_dict_dtypes[col_nm])
elif default_dtype:
if in_df[col_nm].dtype != default_dtype:
in_df[col_nm] = in_df[col_nm].astype(default_dtype)
return in_df
def optimize_dtypes(df, excluded_cols=None, only_int=True, allow_unsigned=False):
'''
Optimize data type of each column to minimum size.
'''
df = df.copy()
if excluded_cols:
assert(type(excluded_cols) == list)
list_cols = [col for col in df.columns if col not in excluded_cols]
else:
list_cols = list(df.columns)
if (only_int==True) :
list_cols = [col for col in list_cols if 'int' in str(df[col].dtype)]
for col in list_cols:
col_dtype_ori_str = str(df[col].dtype)
col_max_val = df[col].max()
col_min_val = df[col].min()
if 'int' in col_dtype_ori_str:
if (col_min_val >= 0) & (allow_unsigned==True):
if col_max_val < 2**8:
col_dtype_new = np.uint8
elif col_max_val < 2**16:
col_dtype_new = np.uint16
elif col_max_val < 2**32:
col_dtype_new = np.uint32
else:
col_dtype_new = np.uint64
else:
if (col_max_val < 2**7) & (col_min_val >= -2**7):
col_dtype_new = np.int8
elif (col_max_val < 2**15) & (col_min_val >= -2**15):
col_dtype_new = np.int16
elif (col_max_val < 2**31) & (col_min_val >= -2**31):
col_dtype_new = np.int32
else:
col_dtype_new = np.int64
assert(col_min_val == col_dtype_new(col_min_val))
assert(col_max_val == col_dtype_new(col_max_val))
col_dtype_new_str = str(col_dtype_new).split("'")[1].split('.')[1]
if col_dtype_ori_str != col_dtype_new_str:
df[col] = df[col].astype(col_dtype_new)
print(f'Column "{col}": {col_dtype_ori_str} -> {col_dtype_new_str}')
else:
pass
return df | 27.643216 | 86 | 0.570805 | import pandas as pd
import numpy as np
from IPython.display import display, HTML, display_html
from tqdm.notebook import tqdm
def auto_adjust():
'''
Set column width = 100
Max displayed rows = 100
Max displayed columns = 100
'''
set_colwidth(100)
pd.options.display.max_rows = 100
pd.options.display.max_columns = 100
def set_max_rows(max_rows=100):
'''
Set max display rows
Return : None
'''
pd.options.display.max_rows = max_rows
def set_max_columns(max_columns=100):
'''
Set max display columns
Return : None
'''
pd.options.display.max_columns = max_columns
def display_html(df):
'''
display a dataframe as html table
'''
display(HTML(df.to_html()))
def inc_colwidth(inc_colwidth=20,target_colwidth=None):
'''
Increase column width of pandas dataframe display
Return : None
'''
if target_colwidth == None:
curr_max_colwidth = pd.get_option("display.max_colwidth")
new_max_colwidth = curr_max_colwidth + inc_colwidth
pd.set_option('max_colwidth', new_max_colwidth)
else:
pd.set_option('max_colwidth', target_colwidth)
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def dec_colwidth(dec_colwidth=20,target_colwidth=None):
'''
Decrease column width of pandas dataframe display
Return : None
'''
if target_colwidth == None:
curr_max_colwidth = pd.get_option("display.max_colwidth")
new_max_colwidth = curr_max_colwidth - dec_colwidth
pd.set_option('max_colwidth', new_max_colwidth)
else:
pd.set_option('max_colwidth', target_colwidth)
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def set_colwidth(target_colwidth=100):
'''
Decrease column width of pandas dataframe display
Return : None
'''
pd.set_option('max_colwidth', target_colwidth)
def get_curr_colwidth():
'''
Decrease column width of pandas dataframe display
Return : None
'''
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def read_parquets(list_file_path, columns='all'):
'''
Read multiple parquet files of the same template into a single pandas dataframe.
'''
list_df = []
for file_path in tqdm(list_file_path, 'reading parquets...'):
if columns=='all':
list_df.append(pd.read_parquet(file_path))
else:
list_df.append(pd.read_parquet(file_path, columns=columns))
df = pd.concat(list_df)
return df
def convert_dtypes(in_df, in_dict_dtypes, default_dtype=None):
'''
Convert dtypes of a dataframe according to given dict of column names and dtypes.
'''
in_df = in_df.copy()
for col_nm in in_df.columns:
if col_nm in in_dict_dtypes.keys():
if in_df[col_nm].dtype != in_dict_dtypes[col_nm]:
in_df[col_nm] = in_df[col_nm].astype(in_dict_dtypes[col_nm])
elif default_dtype:
if in_df[col_nm].dtype != default_dtype:
in_df[col_nm] = in_df[col_nm].astype(default_dtype)
return in_df
def optimize_dtypes(df, excluded_cols=None, only_int=True, allow_unsigned=False):
'''
Optimize data type of each column to minimum size.
'''
df = df.copy()
if excluded_cols:
assert(type(excluded_cols) == list)
list_cols = [col for col in df.columns if col not in excluded_cols]
else:
list_cols = list(df.columns)
if (only_int==True) :
list_cols = [col for col in list_cols if 'int' in str(df[col].dtype)]
for col in list_cols:
col_dtype_ori_str = str(df[col].dtype)
col_max_val = df[col].max()
col_min_val = df[col].min()
if 'int' in col_dtype_ori_str:
if (col_min_val >= 0) & (allow_unsigned==True):
if col_max_val < 2**8:
col_dtype_new = np.uint8
elif col_max_val < 2**16:
col_dtype_new = np.uint16
elif col_max_val < 2**32:
col_dtype_new = np.uint32
else:
col_dtype_new = np.uint64
else:
if (col_max_val < 2**7) & (col_min_val >= -2**7):
col_dtype_new = np.int8
elif (col_max_val < 2**15) & (col_min_val >= -2**15):
col_dtype_new = np.int16
elif (col_max_val < 2**31) & (col_min_val >= -2**31):
col_dtype_new = np.int32
else:
col_dtype_new = np.int64
assert(col_min_val == col_dtype_new(col_min_val))
assert(col_max_val == col_dtype_new(col_max_val))
col_dtype_new_str = str(col_dtype_new).split("'")[1].split('.')[1]
if col_dtype_ori_str != col_dtype_new_str:
df[col] = df[col].astype(col_dtype_new)
print(f'Column "{col}": {col_dtype_ori_str} -> {col_dtype_new_str}')
else:
pass
return df | 0 | 0 | 0 |
134308f4150cf7730fd571f7ab628d0f7e0754d6 | 7,004 | py | Python | grappa/test.py | sgissinger/grappa | 51157a828d5cfdc731cada9b16255eaaf1cabbe6 | [
"MIT"
] | null | null | null | grappa/test.py | sgissinger/grappa | 51157a828d5cfdc731cada9b16255eaaf1cabbe6 | [
"MIT"
] | null | null | null | grappa/test.py | sgissinger/grappa | 51157a828d5cfdc731cada9b16255eaaf1cabbe6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .log import log
from .empty import empty
from .base import BaseTest
from .engine import Engine
from .context import Context
from .resolver import OperatorResolver
class Test(BaseTest):
"""
Test represents the test definition in `grappa` with extensible and
dynamic, runtime inferred DSL based on registered operators and
third-party plugins.
Arguments:
subject (mixed): subject value to test.
"""
# Tracks context manager scopes
_context = 0
# Tracks yielded value by context manager
_context_subject = empty
# Global flag, only used by global singleton instance
_global = False
@property
def should(self):
"""
Alias name to self reference the current instance.
Required for DSL API.
"""
return self
@property
def expect(self):
"""
Alias name to self reference the current instance.
Required for DSL API.
"""
return self
@property
def __call__(self, subject, overload=False):
"""
Overloads function invokation of `Test` class instance.
This is magical and widely used in `grappa` test execution by both
developers and internal engine.
Arguments:
subject (mixed): test subject to use.
overload (bool): `True` if the call if triggered via operator
overloading invokation, otherise `False`.
Returns:
grappa.Test: new test instance with the given subject.
"""
self._ctx.subject = subject
__tracebackhide__ = True
return self._trigger() if overload else Test(subject)
def __getattr__(self, name):
"""
Overloads class attribute accessor proxying calls dynamically
into assertion operators calls.
This method is invoked by Python runtime engine, not by developers.
"""
# Return a new test instance if running as global
if self._global:
# If using context manager, use context defined subject
subject = self._context_subject if self._context else empty
# Create new test and proxy attribute call
return Test(subject).__getattr__(name)
# Resolve and register operator by name
__tracebackhide__ = True
return OperatorResolver(self).resolve(name)
def _trigger(self):
"""
Trigger assertions in the current test engine.
Raises:
AssertionError: in case of assertion error.
Exception: in case of any other assertion error.
"""
log.debug('[test] trigger with context: {}'.format(self._ctx))
try:
err = self._engine.run(self._ctx)
except Exception as _err:
err = _err
finally:
# Important: reset engine state to defaults
self._engine.reset()
self._root._engine.reset()
# If error is present, raise it!
if err:
__tracebackhide__ = True
raise err
return self
def _clone(self):
"""
Clones the current `Test` instance.
Returns:
grappa.Test
"""
test = Test(self._ctx.subject)
test._ctx = self._ctx.clone()
test._engine = self._engine.clone()
return test
def _flush(self):
"""
Flushes the current test state, including test engine, assertions and
current context.
"""
self.__init__()
# Assertions composition
def all(self, *tests):
"""
Composes multiple tests and executes them, in series, once a
subject is received.
Conditional composition operator equivalent to `all` built-in
Python function.
Arguments:
*tests (grappa.Test): test instances to run.
"""
self._engine.add_assertion(run_tests)
return self
def any(self, *tests):
"""
Composes multiple tests and executes them, in series, once a
subject is received.
Conditional composition operator equivalent to `any` built-in
Python function.
Arguments:
*tests (grappa.Test): test instances to run.
"""
self._engine.add_assertion(run_tests)
return self
def __overload__(self, subject):
"""
Method triggered by magic methods executed via operator overloading.
"""
if isinstance(subject, Test):
# Clone test instance to make it side-effects free
fork = subject._clone()
fork._ctx.chained = True
fork._ctx.subject = self._ctx.subject
# Trigger assertions
__tracebackhide__ = True
return fork._trigger()
# Otherwise invoke the test function with a subject
__tracebackhide__ = True
return self.__call__(subject, overload=True)
def __or__(self, value):
"""
Overloads ``|`` as from left-to-right operator precedence expression.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __ror__(self, value):
"""
Overloads ``|`` operator.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __gt__(self, value):
"""
Overloads ``>`` operator.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __enter__(self):
"""
Initializes context manager.
"""
log.debug('creates new test context manager: {}'.format(self._ctx))
test._context += 1
test._context_subject = self._ctx.subject
def __exit__(self, etype, value, traceback):
"""
Exists context manager.
"""
log.debug('exists test context manager: {}'.format(value))
test._context -= 1
if test._context == 0:
test._context_subject = empty
# Create global singleton instance
test = Test()
# This is black magic in order to deal with chainable states
# and operator precedence.
test._global = True
| 27.904382 | 77 | 0.583095 | # -*- coding: utf-8 -*-
from .log import log
from .empty import empty
from .base import BaseTest
from .engine import Engine
from .context import Context
from .resolver import OperatorResolver
class Test(BaseTest):
"""
Test represents the test definition in `grappa` with extensible and
dynamic, runtime inferred DSL based on registered operators and
third-party plugins.
Arguments:
subject (mixed): subject value to test.
"""
# Tracks context manager scopes
_context = 0
# Tracks yielded value by context manager
_context_subject = empty
# Global flag, only used by global singleton instance
_global = False
def __init__(self, subject=empty):
self._engine = Engine()
self._ctx = Context()
self._ctx.subjects = []
self._ctx.subject = subject
self._ctx.chained = False
self._ctx.style = 'should'
@property
def should(self):
"""
Alias name to self reference the current instance.
Required for DSL API.
"""
return self
@property
def expect(self):
"""
Alias name to self reference the current instance.
Required for DSL API.
"""
return self
@property
def _root(self):
return test
def __call__(self, subject, overload=False):
"""
Overloads function invokation of `Test` class instance.
This is magical and widely used in `grappa` test execution by both
developers and internal engine.
Arguments:
subject (mixed): test subject to use.
overload (bool): `True` if the call if triggered via operator
overloading invokation, otherise `False`.
Returns:
grappa.Test: new test instance with the given subject.
"""
self._ctx.subject = subject
__tracebackhide__ = True
return self._trigger() if overload else Test(subject)
def __getattr__(self, name):
"""
Overloads class attribute accessor proxying calls dynamically
into assertion operators calls.
This method is invoked by Python runtime engine, not by developers.
"""
# Return a new test instance if running as global
if self._global:
# If using context manager, use context defined subject
subject = self._context_subject if self._context else empty
# Create new test and proxy attribute call
return Test(subject).__getattr__(name)
# Resolve and register operator by name
__tracebackhide__ = True
return OperatorResolver(self).resolve(name)
def _trigger(self):
"""
Trigger assertions in the current test engine.
Raises:
AssertionError: in case of assertion error.
Exception: in case of any other assertion error.
"""
log.debug('[test] trigger with context: {}'.format(self._ctx))
try:
err = self._engine.run(self._ctx)
except Exception as _err:
err = _err
finally:
# Important: reset engine state to defaults
self._engine.reset()
self._root._engine.reset()
# If error is present, raise it!
if err:
__tracebackhide__ = True
raise err
return self
def _clone(self):
"""
Clones the current `Test` instance.
Returns:
grappa.Test
"""
test = Test(self._ctx.subject)
test._ctx = self._ctx.clone()
test._engine = self._engine.clone()
return test
def _flush(self):
"""
Flushes the current test state, including test engine, assertions and
current context.
"""
self.__init__()
# Assertions composition
def all(self, *tests):
"""
Composes multiple tests and executes them, in series, once a
subject is received.
Conditional composition operator equivalent to `all` built-in
Python function.
Arguments:
*tests (grappa.Test): test instances to run.
"""
def run_tests(subject):
for test in tests:
try:
test(subject, overload=True)
except Exception as err:
return err
return True
self._engine.add_assertion(run_tests)
return self
def any(self, *tests):
"""
Composes multiple tests and executes them, in series, once a
subject is received.
Conditional composition operator equivalent to `any` built-in
Python function.
Arguments:
*tests (grappa.Test): test instances to run.
"""
def run_tests(subject):
err = None
for test in tests:
try:
test(subject, overload=True)
except Exception as _err:
err = _err
else:
return True
return err
self._engine.add_assertion(run_tests)
return self
def __overload__(self, subject):
"""
Method triggered by magic methods executed via operator overloading.
"""
if isinstance(subject, Test):
# Clone test instance to make it side-effects free
fork = subject._clone()
fork._ctx.chained = True
fork._ctx.subject = self._ctx.subject
# Trigger assertions
__tracebackhide__ = True
return fork._trigger()
# Otherwise invoke the test function with a subject
__tracebackhide__ = True
return self.__call__(subject, overload=True)
def __or__(self, value):
"""
Overloads ``|`` as from left-to-right operator precedence expression.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __ror__(self, value):
"""
Overloads ``|`` operator.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __gt__(self, value):
"""
Overloads ``>`` operator.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __enter__(self):
"""
Initializes context manager.
"""
log.debug('creates new test context manager: {}'.format(self._ctx))
test._context += 1
test._context_subject = self._ctx.subject
def __exit__(self, etype, value, traceback):
"""
Exists context manager.
"""
log.debug('exists test context manager: {}'.format(value))
test._context -= 1
if test._context == 0:
test._context_subject = empty
# Create global singleton instance
test = Test()
# This is black magic in order to deal with chainable states
# and operator precedence.
test._global = True
| 702 | 0 | 113 |
874f4345d3b70028edf1b074549178e459aa3ab9 | 1,431 | py | Python | Traidoo/settings/third_party/drf.py | stanwood/traidoo-api | 83e8599f2eb54352988bac27e2d4acd30734816d | [
"MIT"
] | 3 | 2020-05-05T12:12:09.000Z | 2020-05-08T08:48:16.000Z | Traidoo/settings/third_party/drf.py | stanwood/traidoo-api | 83e8599f2eb54352988bac27e2d4acd30734816d | [
"MIT"
] | 160 | 2020-05-19T13:03:43.000Z | 2022-03-12T00:35:28.000Z | Traidoo/settings/third_party/drf.py | stanwood/traidoo-api | 83e8599f2eb54352988bac27e2d4acd30734816d | [
"MIT"
] | null | null | null | import environ
env = environ.Env()
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 10,
"MAX_PAGE_SIZE": 100,
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
"rest_framework.filters.OrderingFilter",
"rest_framework.filters.SearchFilter",
),
"DEFAULT_RENDERER_CLASSES": [
"djangorestframework_camel_case.render.CamelCaseJSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
]
if env.bool("DEBUG")
else ["djangorestframework_camel_case.render.CamelCaseJSONRenderer",],
"DEFAULT_PARSER_CLASSES": (
"djangorestframework_camel_case.parser.CamelCaseJSONParser",
"djangorestframework_camel_case.parser.CamelCaseFormParser",
"djangorestframework_camel_case.parser.CamelCaseMultiPartParser",
),
"JSON_UNDERSCOREIZE": {"no_underscore_before_number": True},
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"COERCE_DECIMAL_TO_STRING": False,
"DATETIME_FORMAT": None,
"EXCEPTION_HANDLER": "core.errors.exception_handler.full_details_exception_handler",
}
| 39.75 | 88 | 0.745632 | import environ
env = environ.Env()
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 10,
"MAX_PAGE_SIZE": 100,
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
"rest_framework.filters.OrderingFilter",
"rest_framework.filters.SearchFilter",
),
"DEFAULT_RENDERER_CLASSES": [
"djangorestframework_camel_case.render.CamelCaseJSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
]
if env.bool("DEBUG")
else ["djangorestframework_camel_case.render.CamelCaseJSONRenderer",],
"DEFAULT_PARSER_CLASSES": (
"djangorestframework_camel_case.parser.CamelCaseJSONParser",
"djangorestframework_camel_case.parser.CamelCaseFormParser",
"djangorestframework_camel_case.parser.CamelCaseMultiPartParser",
),
"JSON_UNDERSCOREIZE": {"no_underscore_before_number": True},
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"COERCE_DECIMAL_TO_STRING": False,
"DATETIME_FORMAT": None,
"EXCEPTION_HANDLER": "core.errors.exception_handler.full_details_exception_handler",
}
| 0 | 0 | 0 |
9b0a2001b1080869843a4a21040e0f6bb69b8230 | 435 | py | Python | lib/max_subarray.py | ktiktok96/dynamic-programming | 19b993e105311c761ab8576e972e93acaab358d1 | [
"MIT"
] | null | null | null | lib/max_subarray.py | ktiktok96/dynamic-programming | 19b993e105311c761ab8576e972e93acaab358d1 | [
"MIT"
] | null | null | null | lib/max_subarray.py | ktiktok96/dynamic-programming | 19b993e105311c761ab8576e972e93acaab358d1 | [
"MIT"
] | null | null | null |
def max_sub_array(nums):
""" Returns the max subarray of the given list of numbers.
Returns 0 if nums is None or an empty list.
Time Complexity: O(n)
Space Complexity: O(1)
"""
max_sub_array = 0
sum = 0
for num in nums:
sum = max(0, sum + num)
max_sub_array = max(max_sub_array, sum)
if max_sub_array <= 0:
return max(nums)
return max_sub_array | 27.1875 | 62 | 0.581609 |
def max_sub_array(nums):
""" Returns the max subarray of the given list of numbers.
Returns 0 if nums is None or an empty list.
Time Complexity: O(n)
Space Complexity: O(1)
"""
max_sub_array = 0
sum = 0
for num in nums:
sum = max(0, sum + num)
max_sub_array = max(max_sub_array, sum)
if max_sub_array <= 0:
return max(nums)
return max_sub_array | 0 | 0 | 0 |
18e04cff1c19f026e21c3549f3feb6424a4f4f3d | 5,856 | py | Python | mfem/_ser/sets.py | tomstitt/PyMFEM | b00199ec0d7a5fba891f656575e91a64d3e35eb5 | [
"BSD-3-Clause"
] | 93 | 2017-03-01T16:45:33.000Z | 2022-03-27T22:10:33.000Z | mfem/_ser/sets.py | tomstitt/PyMFEM | b00199ec0d7a5fba891f656575e91a64d3e35eb5 | [
"BSD-3-Clause"
] | 64 | 2017-03-15T21:47:31.000Z | 2022-03-31T23:59:00.000Z | mfem/_ser/sets.py | tomstitt/PyMFEM | b00199ec0d7a5fba891f656575e91a64d3e35eb5 | [
"BSD-3-Clause"
] | 32 | 2017-03-02T22:13:38.000Z | 2022-03-26T13:09:31.000Z | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sets
else:
import _sets
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sets.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sets.SWIG_PyStaticMethod_New
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.table
class IntegerSet(object):
r"""Proxy of C++ mfem::IntegerSet class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(IntegerSet self) -> IntegerSet
__init__(IntegerSet self, IntegerSet s) -> IntegerSet
__init__(IntegerSet self, int const n, int const * p) -> IntegerSet
"""
_sets.IntegerSet_swiginit(self, _sets.new_IntegerSet(*args))
def Size(self):
r"""Size(IntegerSet self) -> int"""
return _sets.IntegerSet_Size(self)
Size = _swig_new_instance_method(_sets.IntegerSet_Size)
def PickElement(self):
r"""PickElement(IntegerSet self) -> int"""
return _sets.IntegerSet_PickElement(self)
PickElement = _swig_new_instance_method(_sets.IntegerSet_PickElement)
def PickRandomElement(self):
r"""PickRandomElement(IntegerSet self) -> int"""
return _sets.IntegerSet_PickRandomElement(self)
PickRandomElement = _swig_new_instance_method(_sets.IntegerSet_PickRandomElement)
def __eq__(self, s):
r"""__eq__(IntegerSet self, IntegerSet s) -> int"""
return _sets.IntegerSet___eq__(self, s)
__eq__ = _swig_new_instance_method(_sets.IntegerSet___eq__)
def Recreate(self, n, p):
r"""Recreate(IntegerSet self, int const n, int const * p)"""
return _sets.IntegerSet_Recreate(self, n, p)
Recreate = _swig_new_instance_method(_sets.IntegerSet_Recreate)
__swig_destroy__ = _sets.delete_IntegerSet
# Register IntegerSet in _sets:
_sets.IntegerSet_swigregister(IntegerSet)
class ListOfIntegerSets(object):
r"""Proxy of C++ mfem::ListOfIntegerSets class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def Size(self):
r"""Size(ListOfIntegerSets self) -> int"""
return _sets.ListOfIntegerSets_Size(self)
Size = _swig_new_instance_method(_sets.ListOfIntegerSets_Size)
def PickElementInSet(self, i):
r"""PickElementInSet(ListOfIntegerSets self, int i) -> int"""
return _sets.ListOfIntegerSets_PickElementInSet(self, i)
PickElementInSet = _swig_new_instance_method(_sets.ListOfIntegerSets_PickElementInSet)
def PickRandomElementInSet(self, i):
r"""PickRandomElementInSet(ListOfIntegerSets self, int i) -> int"""
return _sets.ListOfIntegerSets_PickRandomElementInSet(self, i)
PickRandomElementInSet = _swig_new_instance_method(_sets.ListOfIntegerSets_PickRandomElementInSet)
def Insert(self, s):
r"""Insert(ListOfIntegerSets self, IntegerSet s) -> int"""
return _sets.ListOfIntegerSets_Insert(self, s)
Insert = _swig_new_instance_method(_sets.ListOfIntegerSets_Insert)
def Lookup(self, s):
r"""Lookup(ListOfIntegerSets self, IntegerSet s) -> int"""
return _sets.ListOfIntegerSets_Lookup(self, s)
Lookup = _swig_new_instance_method(_sets.ListOfIntegerSets_Lookup)
def AsTable(self, t):
r"""AsTable(ListOfIntegerSets self, Table t)"""
return _sets.ListOfIntegerSets_AsTable(self, t)
AsTable = _swig_new_instance_method(_sets.ListOfIntegerSets_AsTable)
__swig_destroy__ = _sets.delete_ListOfIntegerSets
def __init__(self):
r"""__init__(ListOfIntegerSets self) -> ListOfIntegerSets"""
_sets.ListOfIntegerSets_swiginit(self, _sets.new_ListOfIntegerSets())
# Register ListOfIntegerSets in _sets:
_sets.ListOfIntegerSets_swigregister(ListOfIntegerSets)
| 36.830189 | 118 | 0.713115 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sets
else:
import _sets
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sets.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sets.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.table
class IntegerSet(object):
r"""Proxy of C++ mfem::IntegerSet class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(IntegerSet self) -> IntegerSet
__init__(IntegerSet self, IntegerSet s) -> IntegerSet
__init__(IntegerSet self, int const n, int const * p) -> IntegerSet
"""
_sets.IntegerSet_swiginit(self, _sets.new_IntegerSet(*args))
def Size(self):
r"""Size(IntegerSet self) -> int"""
return _sets.IntegerSet_Size(self)
Size = _swig_new_instance_method(_sets.IntegerSet_Size)
def PickElement(self):
r"""PickElement(IntegerSet self) -> int"""
return _sets.IntegerSet_PickElement(self)
PickElement = _swig_new_instance_method(_sets.IntegerSet_PickElement)
def PickRandomElement(self):
r"""PickRandomElement(IntegerSet self) -> int"""
return _sets.IntegerSet_PickRandomElement(self)
PickRandomElement = _swig_new_instance_method(_sets.IntegerSet_PickRandomElement)
def __eq__(self, s):
r"""__eq__(IntegerSet self, IntegerSet s) -> int"""
return _sets.IntegerSet___eq__(self, s)
__eq__ = _swig_new_instance_method(_sets.IntegerSet___eq__)
def Recreate(self, n, p):
r"""Recreate(IntegerSet self, int const n, int const * p)"""
return _sets.IntegerSet_Recreate(self, n, p)
Recreate = _swig_new_instance_method(_sets.IntegerSet_Recreate)
__swig_destroy__ = _sets.delete_IntegerSet
# Register IntegerSet in _sets:
_sets.IntegerSet_swigregister(IntegerSet)
class ListOfIntegerSets(object):
r"""Proxy of C++ mfem::ListOfIntegerSets class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def Size(self):
r"""Size(ListOfIntegerSets self) -> int"""
return _sets.ListOfIntegerSets_Size(self)
Size = _swig_new_instance_method(_sets.ListOfIntegerSets_Size)
def PickElementInSet(self, i):
r"""PickElementInSet(ListOfIntegerSets self, int i) -> int"""
return _sets.ListOfIntegerSets_PickElementInSet(self, i)
PickElementInSet = _swig_new_instance_method(_sets.ListOfIntegerSets_PickElementInSet)
def PickRandomElementInSet(self, i):
r"""PickRandomElementInSet(ListOfIntegerSets self, int i) -> int"""
return _sets.ListOfIntegerSets_PickRandomElementInSet(self, i)
PickRandomElementInSet = _swig_new_instance_method(_sets.ListOfIntegerSets_PickRandomElementInSet)
def Insert(self, s):
r"""Insert(ListOfIntegerSets self, IntegerSet s) -> int"""
return _sets.ListOfIntegerSets_Insert(self, s)
Insert = _swig_new_instance_method(_sets.ListOfIntegerSets_Insert)
def Lookup(self, s):
r"""Lookup(ListOfIntegerSets self, IntegerSet s) -> int"""
return _sets.ListOfIntegerSets_Lookup(self, s)
Lookup = _swig_new_instance_method(_sets.ListOfIntegerSets_Lookup)
def AsTable(self, t):
r"""AsTable(ListOfIntegerSets self, Table t)"""
return _sets.ListOfIntegerSets_AsTable(self, t)
AsTable = _swig_new_instance_method(_sets.ListOfIntegerSets_AsTable)
__swig_destroy__ = _sets.delete_ListOfIntegerSets
def __init__(self):
r"""__init__(ListOfIntegerSets self) -> ListOfIntegerSets"""
_sets.ListOfIntegerSets_swiginit(self, _sets.new_ListOfIntegerSets())
# Register ListOfIntegerSets in _sets:
_sets.ListOfIntegerSets_swigregister(ListOfIntegerSets)
| 1,035 | 0 | 95 |
b9a277e36f9eebd5643b7b0866eb13ea6b643a80 | 11,587 | py | Python | tools/evalution.py | ashok-arjun/fastgan | fdd79a61bcb13ae3a5d4e7fd9a7d02009c984e80 | [
"MIT"
] | 12 | 2020-11-15T10:38:34.000Z | 2021-12-23T14:55:39.000Z | tools/evalution.py | ashok-arjun/fastgan | fdd79a61bcb13ae3a5d4e7fd9a7d02009c984e80 | [
"MIT"
] | null | null | null | tools/evalution.py | ashok-arjun/fastgan | fdd79a61bcb13ae3a5d4e7fd9a7d02009c984e80 | [
"MIT"
] | 4 | 2021-02-25T19:49:56.000Z | 2022-01-24T14:04:47.000Z | import torch
import torch.nn.functional as F
from torchvision.models import inception_v3
import numpy as np
import random
from tools.others import sample_ZCs
from scipy.stats import entropy
from scipy import linalg
def get_activations_stat_orig(datasets, n_samples,ipt_net,ipt_dims, n_gpu,dali):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
curr_n_samples = 0
ipt_net.eval()
pred_arr = np.empty((n_samples, ipt_dims))
#for i, data in enumerate(datasets, 0):
i = 0
loader_iter = iter(datasets)
while True:
try:
data = next(loader_iter)
except StopIteration:
loader_iter = iter(datasets)
data = next(datasets)
if dali:
imgs = data[0]["data"]
y_real_c = data[0]["label"].squeeze().long()
else:
(imgs,y_real_c) = data
# print('orig',i)
start = i * imgs.size(0)
end = start + imgs.size(0)
if imgs.size(2) != 299 or imgs.size(3) != 299:
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
if n_gpu>0:
imgs = imgs.cuda()
pred = ipt_net(imgs)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
# print(start,end,batch_size)
pred_arr[start:end] = pred.cpu().data.numpy().reshape(imgs.size(0), -1)
curr_n_samples += imgs.size(0)
if curr_n_samples>= n_samples:
break
i = i + 1
mu = np.mean(pred_arr, axis=0)
sigma = np.cov(pred_arr, rowvar=False)
return mu,sigma
def get_activations_stat_gen(netG,z_dim,n_classes,Z_dist,Z_params,ipt_net,total_itrs,batch_size, ipt_dims,n_gpu):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
ipt_net.eval()
n_used_imgs = total_itrs * batch_size
pred_arr = np.empty((n_used_imgs, ipt_dims))
for i in range(total_itrs):
# print('gen',i)
Z, C_int, C_vec = sample_ZCs(batch_size,z_dim,n_classes,Z_dist,Z_params,n_gpu)
start = i * batch_size
end = start + batch_size
imgs = netG(Z,C_vec)
if imgs.size(2) != 299 or imgs.size(3) != 299:
#imgs = imgs.data.mul_(0.5).add_(0.5).mul_(255).clamp_(0,255).round_().div_(255).mul_(2).sub_(1)
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
pred = ipt_net(imgs)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
mu = np.mean(pred_arr, axis=0)
sigma = np.cov(pred_arr, rowvar=False)
return mu,sigma
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def frechet_inception_distance(netG,ipt_net,z_dim,n_classes,Z_dist,Z_params,n_samples,batch_size, m2,s2,ipt_dims, n_gpu):
"""Calculates the FID of two paths"""
total_itrs = int(n_samples/batch_size)
m1, s1 = get_activations_stat_gen(netG,z_dim,n_classes,Z_dist,Z_params,ipt_net,total_itrs,batch_size, ipt_dims,n_gpu)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
| 41.981884 | 121 | 0.626651 | import torch
import torch.nn.functional as F
from torchvision.models import inception_v3
import numpy as np
import random
from tools.others import sample_ZCs
from scipy.stats import entropy
from scipy import linalg
def generate_imgs(netG,total, batch_size,img_width,z_dim,n_classes,Z_dist,Z_params,n_gpu):
img_list = []
for i in range(int(total/batch_size)):
noise,conditions_int,conditions=sample_ZCs(batch_size,z_dim,n_classes,Z_dist,Z_params,n_gpu)
tmp_imgs = netG(noise,conditions)
tmp_imgs = tmp_imgs.detach().cpu().numpy()
tmp_imgs = np.asarray(np.clip(tmp_imgs * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
img_list.append(tmp_imgs)
img_list = np.asarray(img_list)
img_list = img_list.reshape((total, 3, img_width, img_width))
return img_list
def sample_imgs(train_loader,total,total_iters,img_width,dali=True):
img_list = []
for i, data in enumerate(train_loader, 0):
if dali:
x_real = data[0]["data"]
y_real_c = data[0]["label"].view(-1).long()
else:
(x_real,y_real_c) = data
#x_real = F.interpolate(input=x_real,size=(299, 299), mode='bilinear',align_corners=False)
x_real = x_real.cpu().numpy()
x_real = np.asarray(np.clip(x_real * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
img_list.append(x_real)
if i>=total_iters-1:
break
img_list = np.asarray(img_list)
#print(img_list.shape)
img_list = img_list.reshape((total, 3, img_width, img_width))
return img_list
def inception_score(netG,ipt_net, total, batch_size,z_dim,n_classes,img_width,n_splits,Z_dist,Z_params,n_gpu):
scores = []
for i in range(int(total/batch_size)):
noise,conditions_int,conditions=sample_ZCs(batch_size,z_dim,n_classes,Z_dist,Z_params,n_gpu)
imgs = netG(noise,conditions)
if img_width != 299:
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
#print(ipt_net(imgs))
s = ipt_net(imgs)
scores.append(s)
scores=F.softmax(torch.cat(scores, 0), 1)
split_scores=[]
for i in range(n_splits):
p_yx = scores[(i*scores.size(0)//n_splits):((i + 1)*scores.size(0)//n_splits)]
p_y = p_yx.mean(0,keepdim=True).expand(p_yx.size(0), -1)
#KL_d = p_yx * (torch.log(p_yx) - torch.log(p_y))
KL_d = p_yx * (torch.log(p_yx/p_y))
score_mean= KL_d.sum(1).mean().exp()
split_scores.append(score_mean)
split_scores=torch.tensor(split_scores)
f_mean,f_std=split_scores.mean(),split_scores.std()
return f_mean,f_std
def inception_score_test(ipt_net,train_loader,img_width,n_splits,total_iters,n_gpu=0,dali=True):
scores = []
for i, data in enumerate(train_loader, 0):
if dali:
x_real = data[0]["data"]
y_real_c = data[0]["label"].view(-1).long()
else:
(x_real,y_real_c) = data
if n_gpu>0:
x_real= x_real.cuda()
if img_width != 299:
imgs = F.interpolate(input=x_real,size=(299, 299), mode='bilinear',align_corners=False)
s = ipt_net(imgs)
scores.append(s)
if i>total_iters:
break
scores=F.softmax(torch.cat(scores, 0), 1)
split_scores=[]
for i in range(n_splits):
p_yx = scores[(i*scores.size(0)//n_splits):((i + 1)*scores.size(0)//n_splits)]
p_y = p_yx.mean(0,keepdim=True).expand(p_yx.size(0), -1)
#KL_d = p_yx * (torch.log(p_yx) - torch.log(p_y))
KL_d = p_yx * (torch.log(p_yx/p_y))
#KL_d = F.kl_div(p_yx.log(),p_y,reduce=False,reduction='none')
#print(KL_d.size())
score_mean= KL_d.sum(1).mean().exp()
split_scores.append(score_mean)
split_scores=torch.tensor(split_scores)
f_mean,f_std=split_scores.mean(),split_scores.std()
return f_mean,f_std
def cal_valaccu(model,data,n_gpu=0):
correct=0
total=0
for i, (x, y_c_) in enumerate(data,0):
if n_gpu>0:
x, y_c_=x.cuda(), y_c_.cuda()
with torch.no_grad():
_,y_c = model(x)
idx = torch.argmax(y_c.data, dim=1)
label_correct = idx.eq(y_c_)
correct += torch.sum(label_correct)
total += y_c_.numel()
#print(f'{correct},{total}')
return correct.item()/total
########### FID calculation ############
def get_activations_stat_orig(datasets, n_samples,ipt_net,ipt_dims, n_gpu,dali):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
curr_n_samples = 0
ipt_net.eval()
pred_arr = np.empty((n_samples, ipt_dims))
#for i, data in enumerate(datasets, 0):
i = 0
loader_iter = iter(datasets)
while True:
try:
data = next(loader_iter)
except StopIteration:
loader_iter = iter(datasets)
data = next(datasets)
if dali:
imgs = data[0]["data"]
y_real_c = data[0]["label"].squeeze().long()
else:
(imgs,y_real_c) = data
# print('orig',i)
start = i * imgs.size(0)
end = start + imgs.size(0)
if imgs.size(2) != 299 or imgs.size(3) != 299:
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
if n_gpu>0:
imgs = imgs.cuda()
pred = ipt_net(imgs)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
# print(start,end,batch_size)
pred_arr[start:end] = pred.cpu().data.numpy().reshape(imgs.size(0), -1)
curr_n_samples += imgs.size(0)
if curr_n_samples>= n_samples:
break
i = i + 1
mu = np.mean(pred_arr, axis=0)
sigma = np.cov(pred_arr, rowvar=False)
return mu,sigma
def get_activations_stat_gen(netG,z_dim,n_classes,Z_dist,Z_params,ipt_net,total_itrs,batch_size, ipt_dims,n_gpu):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
ipt_net.eval()
n_used_imgs = total_itrs * batch_size
pred_arr = np.empty((n_used_imgs, ipt_dims))
for i in range(total_itrs):
# print('gen',i)
Z, C_int, C_vec = sample_ZCs(batch_size,z_dim,n_classes,Z_dist,Z_params,n_gpu)
start = i * batch_size
end = start + batch_size
imgs = netG(Z,C_vec)
if imgs.size(2) != 299 or imgs.size(3) != 299:
#imgs = imgs.data.mul_(0.5).add_(0.5).mul_(255).clamp_(0,255).round_().div_(255).mul_(2).sub_(1)
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
pred = ipt_net(imgs)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
mu = np.mean(pred_arr, axis=0)
sigma = np.cov(pred_arr, rowvar=False)
return mu,sigma
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def frechet_inception_distance(netG,ipt_net,z_dim,n_classes,Z_dist,Z_params,n_samples,batch_size, m2,s2,ipt_dims, n_gpu):
"""Calculates the FID of two paths"""
total_itrs = int(n_samples/batch_size)
m1, s1 = get_activations_stat_gen(netG,z_dim,n_classes,Z_dist,Z_params,ipt_net,total_itrs,batch_size, ipt_dims,n_gpu)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
| 4,117 | 0 | 115 |
4274671ad9dba39daea0717c247cf11d5f4db6a5 | 1,512 | py | Python | setup.py | sirspock/grlc | 8cbabf9607b1c5002fb8414f9e3023a60ebeada4 | [
"MIT"
] | null | null | null | setup.py | sirspock/grlc | 8cbabf9607b1c5002fb8414f9e3023a60ebeada4 | [
"MIT"
] | null | null | null | setup.py | sirspock/grlc | 8cbabf9607b1c5002fb8414f9e3023a60ebeada4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import codecs
import os
from setuptools import setup
grlc_base = 'src'
grlc_base_dir = os.path.join(grlc_base, '')
grlc_data = []
for root,dirs,files in os.walk(grlc_base):
if root != grlc_base:
root_dir = root.replace(grlc_base_dir, '')
data_files = os.path.join(root_dir, '*')
grlc_data.append(data_files)
grlc_version = '1.3.0'
with codecs.open('requirements.txt', mode='r') as f:
install_requires = f.read().splitlines()
with codecs.open('requirements-test.txt', mode='r') as f:
tests_require = f.read().splitlines()
with codecs.open('README.md', mode='r', encoding='utf-8') as f:
long_description = f.read()
setup(
name="grlc",
description='grlc, the git repository linked data API constructor',
long_description=long_description,
long_description_content_type='text/markdown',
license="Copyright 2017 Albert Meroño",
author='Albert Meroño',
author_email='albert.merono@vu.nl',
url='https://github.com/CLARIAH/grlc',
version=grlc_version,
py_modules=['grlc'],
packages=['grlc'],
package_dir = {'grlc': grlc_base},
scripts=['bin/grlc-server'],
install_requires=install_requires,
setup_requires=[
# dependency for `python setup.py test`
'pytest-runner',
# dependencies for `python setup.py build_sphinx`
'sphinx',
'recommonmark'
],
tests_require=tests_require,
package_data = {'grlc': grlc_data},
)
| 29.076923 | 71 | 0.667328 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import codecs
import os
from setuptools import setup
grlc_base = 'src'
grlc_base_dir = os.path.join(grlc_base, '')
grlc_data = []
for root,dirs,files in os.walk(grlc_base):
if root != grlc_base:
root_dir = root.replace(grlc_base_dir, '')
data_files = os.path.join(root_dir, '*')
grlc_data.append(data_files)
grlc_version = '1.3.0'
with codecs.open('requirements.txt', mode='r') as f:
install_requires = f.read().splitlines()
with codecs.open('requirements-test.txt', mode='r') as f:
tests_require = f.read().splitlines()
with codecs.open('README.md', mode='r', encoding='utf-8') as f:
long_description = f.read()
setup(
name="grlc",
description='grlc, the git repository linked data API constructor',
long_description=long_description,
long_description_content_type='text/markdown',
license="Copyright 2017 Albert Meroño",
author='Albert Meroño',
author_email='albert.merono@vu.nl',
url='https://github.com/CLARIAH/grlc',
version=grlc_version,
py_modules=['grlc'],
packages=['grlc'],
package_dir = {'grlc': grlc_base},
scripts=['bin/grlc-server'],
install_requires=install_requires,
setup_requires=[
# dependency for `python setup.py test`
'pytest-runner',
# dependencies for `python setup.py build_sphinx`
'sphinx',
'recommonmark'
],
tests_require=tests_require,
package_data = {'grlc': grlc_data},
)
| 0 | 0 | 0 |
ba46bd14caf6f66ee1c37506304836d18d3fa5ae | 1,933 | py | Python | src/experiments.py | xstupi00/University-Course-Timetabling-Problem | fb932808b091eacef34abce3d40ea6d8071ffd77 | [
"MIT"
] | null | null | null | src/experiments.py | xstupi00/University-Course-Timetabling-Problem | fb932808b091eacef34abce3d40ea6d8071ffd77 | [
"MIT"
] | null | null | null | src/experiments.py | xstupi00/University-Course-Timetabling-Problem | fb932808b091eacef34abce3d40ea6d8071ffd77 | [
"MIT"
] | null | null | null | import json
import os
from ucttp import _main
INPUT_DIR = "./../inputs"
PARAMETERS = "/parameters.json"
POPULATION_DEPENDENCE = "./../outputs/population_dependence.txt"
GENERATION_DEPENDENCE = "./../outputs/generation_dependence.txt"
if __name__ == '__main__':
population_size_dependence()
generations_number_dependence()
| 38.66 | 98 | 0.675116 | import json
import os
from ucttp import _main
INPUT_DIR = "./../inputs"
PARAMETERS = "/parameters.json"
POPULATION_DEPENDENCE = "./../outputs/population_dependence.txt"
GENERATION_DEPENDENCE = "./../outputs/generation_dependence.txt"
def set_parameters(population_size, generations_number):
parameters_file = os.path.abspath(os.path.dirname(__file__) + f'{PARAMETERS}')
with open(parameters_file, 'r') as f:
data = json.load(f)
data['population_size'] = population_size
data['generations_number'] = generations_number
os.remove(parameters_file)
with open(parameters_file, 'w') as f:
json.dump(data, f, indent=4)
def population_size_dependence():
result_file = os.path.abspath(os.path.dirname(__file__) + f'{POPULATION_DEPENDENCE}')
_, _, instances = next(os.walk(os.path.abspath(os.path.dirname(__file__) + f'{INPUT_DIR}')))
for instance in instances:
for population_size in range(10, 101, 10):
set_parameters(population_size, 100)
print(f"{instance} - {population_size}")
result = _main(os.path.abspath(os.path.dirname(__file__) + f'{INPUT_DIR}/{instance}'))
with open(result_file, 'a') as file:
file.write(f"{instance} - {population_size} - {result}\n")
def generations_number_dependence():
result_file = os.path.abspath(os.path.dirname(__file__) + f'{GENERATION_DEPENDENCE}')
_, _, instances = next(os.walk(os.path.abspath(os.path.dirname(__file__) + f'{INPUT_DIR}')))
instances = ["large.tim"]
for instance in instances:
set_parameters(50, 20000)
print(f"{instance}")
result = _main(os.path.abspath(os.path.dirname(__file__) + f'{INPUT_DIR}/{instance}'))
with open(result_file, 'a') as file:
file.write(f"{instance} - {result}\n")
if __name__ == '__main__':
population_size_dependence()
generations_number_dependence()
| 1,527 | 0 | 69 |
c8eab47c0587b243d775ee16f82a3c902b57bf52 | 945 | py | Python | src/backup/table_partitions_backup_scheduler.py | Morgenz/bbq | f0fd3f626841c610aee80ad08a61123b7cccb775 | [
"Apache-2.0"
] | null | null | null | src/backup/table_partitions_backup_scheduler.py | Morgenz/bbq | f0fd3f626841c610aee80ad08a61123b7cccb775 | [
"Apache-2.0"
] | null | null | null | src/backup/table_partitions_backup_scheduler.py | Morgenz/bbq | f0fd3f626841c610aee80ad08a61123b7cccb775 | [
"Apache-2.0"
] | 1 | 2021-02-01T12:43:05.000Z | 2021-02-01T12:43:05.000Z | import logging
from src.backup.task_creator import TaskCreator
| 37.8 | 73 | 0.643386 | import logging
from src.backup.task_creator import TaskCreator
class TablePartitionsBackupScheduler(object):
def __init__(self, table_reference, big_query):
self.table_reference = table_reference
self.big_query = big_query
def start(self):
partitions = self.big_query \
.list_table_partitions(self.table_reference.get_project_id(),
self.table_reference.get_dataset_id(),
self.table_reference.get_table_id())
if not partitions:
logging.info("Table %s doesn't contain any partitions",
self.table_reference)
TaskCreator.schedule_tasks_for_partition_backup(
self.table_reference.get_project_id(),
self.table_reference.get_dataset_id(),
self.table_reference.get_table_id(),
[partition['partitionId'] for partition in partitions])
| 780 | 24 | 76 |
34d0218ced01dbf4e6844de59c4a5c856f56d902 | 2,578 | py | Python | formsDemo/app01/views.py | luyl1017713252/python | 3b30cffa85b625e512415fa882b4bc7708a5e0b8 | [
"MulanPSL-1.0"
] | null | null | null | formsDemo/app01/views.py | luyl1017713252/python | 3b30cffa85b625e512415fa882b4bc7708a5e0b8 | [
"MulanPSL-1.0"
] | null | null | null | formsDemo/app01/views.py | luyl1017713252/python | 3b30cffa85b625e512415fa882b4bc7708a5e0b8 | [
"MulanPSL-1.0"
] | null | null | null | from django.core.exceptions import ValidationError
from django.shortcuts import render, HttpResponse, redirect
# Create your views here.
from app01.models import UserInfo
'''
forms组件
1、校验数据
2、页面显示提示信息
'''
from django import forms
# class BookFrom(forms.Form):
# title = forms.CharField(max_length=32)
# price = forms.IntegerField
# email = forms.EmailField()
from django.forms import widgets
| 28.966292 | 146 | 0.59232 | from django.core.exceptions import ValidationError
from django.shortcuts import render, HttpResponse, redirect
# Create your views here.
from app01.models import UserInfo
'''
forms组件
1、校验数据
2、页面显示提示信息
'''
from django import forms
# class BookFrom(forms.Form):
# title = forms.CharField(max_length=32)
# price = forms.IntegerField
# email = forms.EmailField()
from django.forms import widgets
class UserForm(forms.Form):
msg = {'required': '该字段不能为空', 'invalid': '邮箱格式不正确'}
user = forms.CharField(label='用户名',max_length=32, min_length=4, error_messages=msg, widget=widgets.TextInput(attrs={'class': 'form-control'}))
pwd = forms.CharField(label='密码', error_messages=msg,
widget=widgets.PasswordInput(attrs={'class': 'form-control'}))
r_pwd = forms.CharField(label='确认密码', error_messages=msg,
widget=widgets.PasswordInput(attrs={'class': 'form-control'}))
email = forms.EmailField(label='邮箱', error_messages=msg, widget=widgets.EmailInput(attrs={'class': 'form-control'}))
# 局部钩子 像是陷阱
def clean_user(self):
# 拿到客户端传过来的user
val = self.cleaned_data.get('user')
# 去数据库钟查找是否有该用户
ret = UserInfo.objects.filter(user=val)
if ret:
# 有的话报一个错误
raise ValidationError('用户名已存在')
else:
# 没有的话就放行
return val
def clean_pwd(self):
# 密码不能为纯数字
val = self.cleaned_data.get('pwd')
if val.isdigit():
raise ValidationError('密码不能为纯数字')
else:
return val
# 全局钩子
def clean(self):
# 拿到密码和确认密码
pwd = self.cleaned_data.get('pwd')
r_pwd = self.cleaned_data.get('r_pwd')
if pwd == r_pwd:
return self.cleaned_data
else:
raise ValidationError('密码不一致')
def reg(request):
if request.method == 'POST':
# 1、获取页面传递的数据
# 2、验证是否合规
form = UserForm(request.POST)
# 3、合规就插入数据
if form.is_valid():
# print(obj.cleaned_data)
UserInfo.objects.create(**form.cleaned_data)
return HttpResponse('ok')
else:
# 4、不合规把错误信息给页面
# print(obj.cleaned_data)
# print(obj.errors)
print(form.errors.get('email')[0])
# errors = form.errors
g_error = form.errors.get('__all__')
return render(request, 'reg.html', locals())
else:
# 用forms渲染页面
form = UserForm()
return render(request, 'reg.html', locals())
| 1,602 | 812 | 46 |
2918ffbc40c0f4fe1a89654a5199fab798d6870e | 141 | py | Python | graph/6.py | miiiingi/algorithmstudy | 75eaf97e2c41d7edf32eb4a57d4d7685c9218aba | [
"MIT"
] | null | null | null | graph/6.py | miiiingi/algorithmstudy | 75eaf97e2c41d7edf32eb4a57d4d7685c9218aba | [
"MIT"
] | null | null | null | graph/6.py | miiiingi/algorithmstudy | 75eaf97e2c41d7edf32eb4a57d4d7685c9218aba | [
"MIT"
] | null | null | null | import collections
answer = solution(5, [[4, 3], [4, 2], [3, 2], [1, 2], [2, 5]])
print(answer)
| 17.625 | 62 | 0.553191 | import collections
def solution(n, results) :
return
answer = solution(5, [[4, 3], [4, 2], [3, 2], [1, 2], [2, 5]])
print(answer)
| 22 | 0 | 22 |
453d9fc13b0fecccdd439c780851be3819ec080c | 180 | py | Python | PyExercises - CeV - Mundo 3/Exercises (00 - 34)/ex 25.py | PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3 | 3c02768eb720c2112ececc95be95caf2bdd98fb1 | [
"MIT"
] | null | null | null | PyExercises - CeV - Mundo 3/Exercises (00 - 34)/ex 25.py | PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3 | 3c02768eb720c2112ececc95be95caf2bdd98fb1 | [
"MIT"
] | null | null | null | PyExercises - CeV - Mundo 3/Exercises (00 - 34)/ex 25.py | PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3 | 3c02768eb720c2112ececc95be95caf2bdd98fb1 | [
"MIT"
] | null | null | null |
phrase = str(input('Type a phrase: ')).strip()
write(phrase)
| 18 | 46 | 0.566667 | def write(phrase):
size = len(str(phrase)) + 4
print('-'*size)
print(' '*2 + phrase)
print('-'*size)
phrase = str(input('Type a phrase: ')).strip()
write(phrase)
| 95 | 0 | 22 |
8ce26a3d6ed9cff9f7cc6c0b28b21113ae27d5c3 | 2,336 | py | Python | librarypaste/mongostore.py | yougov/librarypaste | 740fafcb260f493ca5bbd24afd59d49444c2b2ae | [
"MIT"
] | null | null | null | librarypaste/mongostore.py | yougov/librarypaste | 740fafcb260f493ca5bbd24afd59d49444c2b2ae | [
"MIT"
] | null | null | null | librarypaste/mongostore.py | yougov/librarypaste | 740fafcb260f493ca5bbd24afd59d49444c2b2ae | [
"MIT"
] | 1 | 2022-03-26T09:24:56.000Z | 2022-03-26T09:24:56.000Z | import pymongo
import gridfs
from .datastore import DataStore
| 31.146667 | 79 | 0.583904 | import pymongo
import gridfs
from .datastore import DataStore
class MongoDBDataStore(pymongo.MongoClient, DataStore):
db_name = 'librarypaste'
@property
def db(self):
return self[self.db_name]
@classmethod
def from_uri(cls, uri):
store = cls(uri)
uri_p = pymongo.uri_parser.parse_uri(uri)
if uri_p['database']:
store.db_name = uri_p['database']
return store
def _store(self, uid, content, data=None):
"""Store the given dict of content at uid. Nothing returned."""
doc = dict(uid=uid)
if data:
gfs = gridfs.GridFS(self.db)
id = gfs.put(data, encoding='utf-8')
doc.update(data_id=id)
doc.update(content)
self.db.pastes.insert_one(doc)
def _storeLog(self, nick, time, uid):
"""Adds the nick & uid to the log for a given time/order. No return."""
query = dict(uid=uid)
update = {'$set': dict(nick=nick, time=time)}
self.db.pastes.update(query, update)
def _retrieve(self, uid):
"""Return a dict with the contents of the paste, including the raw
data, if any, as the key 'data'. Must pass in uid, not shortid."""
query = dict(uid=uid)
doc = self.db.pastes.find_one(query)
if 'data_id' in doc:
data_id = doc.pop('data_id')
gfs = gridfs.GridFS(self.db)
doc.update(data=gfs.get(data_id).read())
return doc
def _delete(self, uid):
filter = dict(uid=uid)
doc = self.db.pastes.find_one_and_delete(filter)
if 'data_id' in doc:
gfs = gridfs.GridFS(self.db)
gfs.delete(doc['data_id'])
return doc
def lookup(self, nick):
"""Looks for the most recent paste by a given nick.
Returns the uid or None"""
query = dict(nick=nick)
order = [('time', pymongo.DESCENDING)]
recs = self.db.pastes.find(query).sort(order).limit(1)
try:
return next(recs)['uid']
except StopIteration:
pass
def _lookupUid(self, shortid):
query = dict(shortid=shortid)
rec = self.db.pastes.find_one(query)
return rec['uid']
def list(self):
return (doc['uid'] for doc in self.db.pastes.find(projection=['uid']))
| 609 | 1,640 | 23 |
175ede236d5e7418476de999e58ee58fec8e1803 | 940 | py | Python | packages/pyright-internal/src/tests/samples/overload1.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/overload1.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/overload1.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests the type checker's handling of the overload decorator.
from typing import Literal, overload, Optional
from datetime import datetime, timezone, timedelta
@overload
@overload
result1: datetime = from_json_timestamp(2418049)
# This should generate an error
result2: datetime = from_json_timestamp(None)
result3: None = from_json_timestamp(None)
# This should generate an error
result4: None = from_json_timestamp(2345)
@overload
@overload
t_f1: Literal["float"] = reveal_type(func1(abs(0.0)))
| 18.431373 | 85 | 0.695745 | # This sample tests the type checker's handling of the overload decorator.
from typing import Literal, overload, Optional
from datetime import datetime, timezone, timedelta
@overload
def from_json_timestamp(ts: int) -> datetime:
...
@overload
def from_json_timestamp(ts: None) -> None:
...
def from_json_timestamp(ts: Optional[int]) -> Optional[datetime]:
return (
None
if ts is None
else (datetime(1970, 1, 1, tzinfo=timezone.utc) + timedelta(milliseconds=ts))
)
result1: datetime = from_json_timestamp(2418049)
# This should generate an error
result2: datetime = from_json_timestamp(None)
result3: None = from_json_timestamp(None)
# This should generate an error
result4: None = from_json_timestamp(2345)
@overload
def func1(x: int) -> int:
...
@overload
def func1(x: float) -> float:
...
def func1(x):
return x
t_f1: Literal["float"] = reveal_type(func1(abs(0.0)))
| 278 | 0 | 134 |
d3e4234c4b5015ccb96b7564007bfce396181eb3 | 971 | py | Python | Leetcode/1501-1550/1550-three-consecutive-odds.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | null | null | null | Leetcode/1501-1550/1550-three-consecutive-odds.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | null | null | null | Leetcode/1501-1550/1550-three-consecutive-odds.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | 1 | 2019-10-06T15:46:14.000Z | 2019-10-06T15:46:14.000Z | '''
Author : MiKueen
Level : Easy
Problem Statement : Three Consecutive Odds
Given an integer array arr, return true if there are three consecutive odd numbers in the array. Otherwise, return false.
Example 1:
Input: arr = [2,6,4,1]
Output: false
Explanation: There are no three consecutive odds.
Example 2:
Input: arr = [1,2,34,3,4,5,7,23,12]
Output: true
Explanation: [5,7,23] are three consecutive odds.
Constraints:
1 <= arr.length <= 1000
1 <= arr[i] <= 1000
'''
| 24.897436 | 121 | 0.531411 | '''
Author : MiKueen
Level : Easy
Problem Statement : Three Consecutive Odds
Given an integer array arr, return true if there are three consecutive odd numbers in the array. Otherwise, return false.
Example 1:
Input: arr = [2,6,4,1]
Output: false
Explanation: There are no three consecutive odds.
Example 2:
Input: arr = [1,2,34,3,4,5,7,23,12]
Output: true
Explanation: [5,7,23] are three consecutive odds.
Constraints:
1 <= arr.length <= 1000
1 <= arr[i] <= 1000
'''
class Solution:
def threeConsecutiveOdds(self, arr: List[int]) -> bool:
if len(arr) < 3:
return False
i = 0
while i < len(arr)-2:
j, k = i+1, i+2
if arr[i] % 2 != 0 and arr[j] % 2 != 0 and arr[k] % 2 != 0:
return True
elif arr[k] % 2 == 0:
i += 2
elif arr[j] % 2 == 0:
i += 1
elif arr[i] % 2 == 0:
i += 1
return False
| 448 | -6 | 49 |
e2988c5d955de0faa6d4540671df14c17d63230f | 5,564 | py | Python | tests/with_bytorch/bytorch_toy_experiment.py | roman-bachmann/Rethinking-Binarized-Neural-Network-Optimization | 2c1dab8b7028eef803d7437b79f12369100becf3 | [
"Apache-2.0"
] | 12 | 2019-12-16T11:44:43.000Z | 2021-08-18T12:41:03.000Z | tests/with_bytorch/bytorch_toy_experiment.py | roman-bachmann/Rethinking-Binarized-Neural-Network-Optimization | 2c1dab8b7028eef803d7437b79f12369100becf3 | [
"Apache-2.0"
] | null | null | null | tests/with_bytorch/bytorch_toy_experiment.py | roman-bachmann/Rethinking-Binarized-Neural-Network-Optimization | 2c1dab8b7028eef803d7437b79f12369100becf3 | [
"Apache-2.0"
] | 3 | 2020-01-14T05:53:35.000Z | 2020-12-04T07:54:17.000Z | import torch as t
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as opt
import torch.utils.data as dutils
from research_seed.bytorch.binary_neural_network import (
MomentumWithThresholdBinaryOptimizer,
BinaryLinear,
)
from matplotlib import pyplot as plt
# t.manual_seed(424121)
group_a_generator = dist.Normal(0.8, 0.001)
group_b_generator = dist.Normal(0, 0.001)
group_c_generator = dist.Normal(-0.8, 0.001)
if __name__ == "__main__":
main()
| 26.245283 | 88 | 0.561826 | import torch as t
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as opt
import torch.utils.data as dutils
from research_seed.bytorch.binary_neural_network import (
MomentumWithThresholdBinaryOptimizer,
BinaryLinear,
)
from matplotlib import pyplot as plt
# t.manual_seed(424121)
group_a_generator = dist.Normal(0.8, 0.001)
group_b_generator = dist.Normal(0, 0.001)
group_c_generator = dist.Normal(-0.8, 0.001)
class ToyDataset(dutils.Dataset):
def __init__(self, samples):
self.samples = samples
def __getitem__(self, index):
return self.samples[index]
def __len__(self):
return len(self.samples)
def get_one_hot(hot_index, total_classes):
if 0 <= hot_index < total_classes:
empty = t.zeros(total_classes)
empty[hot_index] = 1
return empty
raise ValueError("cannot go outside range of {}".format(total_classes))
def generate_data(n_samples=1024, n_features=100):
a, b, c = [], [], []
for _ in range(0, n_samples):
a.append((group_a_generator.sample((1, n_features)), 0))
b.append((group_b_generator.sample((1, n_features)), 1))
c.append((group_c_generator.sample((1, n_features)), 2))
return ToyDataset([] + a + b + c)
class RealNet(nn.Module):
def __init__(self, in_features, out_features):
super(RealNet, self).__init__()
self.fc1 = nn.Linear(in_features, 100)
self.fc2 = nn.Linear(100, 50)
self.fc3 = nn.Linear(50, out_features)
def forward(self, x):
x = f.relu(self.fc1(x))
x = f.relu(self.fc2(x))
x = self.fc3(x)
return x
class BinaryNet(nn.Module):
def __init__(self, in_features, out_features):
super(BinaryNet, self).__init__()
self.fc1 = BinaryLinear(in_features, 50)
# self.bn1 = nn.BatchNorm1d(num_features=100)
self.fc2 = BinaryLinear(50, 25)
# self.bn2 = nn.BatchNorm1d(num_features=50)
self.fc3 = BinaryLinear(25, out_features)
# self.bn3 = nn.BatchNorm1d(num_features=out_features)
def forward(self, x):
# x = f.hardtanh(self.bn1(self.fc1(x)))
# x = f.hardtanh(self.bn2(self.fc2(x)))
# x = self.bn3(self.fc3(x))
x = f.hardtanh(self.fc1(x))
x = f.hardtanh(self.fc2(x))
x = self.fc3(x)
return x
def plot_data(dataset):
x = []
y = []
for data, label in dataset:
data = data.flatten()
x.append(data[0].item())
y.append(data[1].item())
print(x)
print(y)
plt.scatter(x, y)
plt.show()
def main():
use_gpu = False
use_binary = True
n_features, n_classes = 2, 3
train = generate_data(n_samples=1024, n_features=n_features)
test = generate_data(n_samples=100, n_features=n_features)
train_loaded = dutils.DataLoader(train, batch_size=16, shuffle=True)
test_loaded = dutils.DataLoader(test, batch_size=16, shuffle=True)
for _ in range(0, 10):
if use_binary:
network: nn.Module = BinaryNet(n_features, n_classes)
loss_fn = f.multi_margin_loss
optimizer = MomentumWithThresholdBinaryOptimizer(
params=network.parameters(), ar=1e-3, threshold=1e-3
)
else:
network: nn.Module = RealNet(n_features, n_classes)
loss_fn = f.cross_entropy
optimizer = opt.SGD(network.parameters(), 0.001)
if use_gpu:
network = network.to("cuda")
for epoch in range(0, 10):
# print("epoch", epoch, end=" ")
sum_loss = 0
total_losses = 0
total_flips = [0] * 6
for i, data in enumerate(train_loaded, 0):
batch, labels = data
if use_gpu:
batch = batch.to("cuda")
labels = labels.to("cuda")
optimizer.zero_grad()
out = network(batch).squeeze()
loss = loss_fn(out, labels)
sum_loss += loss.item()
total_losses += 1
loss.backward()
# for p in network.parameters():
# print("###################")
# print(p)
# print(p.grad)
# print()
flips = optimizer.step()
if use_binary:
total_flips = [a + b for a, b in zip(flips, total_flips)]
print(sum_loss / total_losses, end=" ")
print(total_flips)
correct = 0
total = 0
with t.no_grad():
for data in train_loaded:
images, labels = data
outputs = network(images).squeeze()
_, predicted = t.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
train_accuracy = 100 * (correct / total)
correct = 0
total = 0
with t.no_grad():
for data in test_loaded:
images, labels = data
outputs = network(images).squeeze()
_, predicted = t.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
test_accuracy = 100 * (correct / total)
print(
f"train accuracy: {train_accuracy: .3f} test accuracy: {test_accuracy: .3f}"
)
if __name__ == "__main__":
main()
| 4,666 | 22 | 347 |
d2765a15278f795ce540757645736a00e838d983 | 1,460 | py | Python | trompace/subscriptions/__init__.py | trompamusic/ce-queries-template | cc5ae69d0e76623bfd72e9453f569f6624bf7c3b | [
"Apache-2.0"
] | 1 | 2020-06-18T15:43:18.000Z | 2020-06-18T15:43:18.000Z | trompace/subscriptions/__init__.py | trompamusic/ce-queries-template | cc5ae69d0e76623bfd72e9453f569f6624bf7c3b | [
"Apache-2.0"
] | 60 | 2019-12-17T11:08:28.000Z | 2021-03-02T16:19:41.000Z | trompace/subscriptions/__init__.py | trompamusic/trompace-client | cc5ae69d0e76623bfd72e9453f569f6624bf7c3b | [
"Apache-2.0"
] | null | null | null | import json
class StringConstant:
"""Some values in GraphQL are constants, not strings, and so they shouldn't
be encoded or have quotes put around them. Use this to represent a constant
and it won't be quoted in the query"""
class ListConstant:
"""Some values in GraphQL are constants, not strings, and so they shouldn't
be encoded or have quotes put around them. Use this to represent a list of
constants and it won't be quoted in the query"""
def BoolConstant(in_bool: bool):
""" Converts a boolean value to a constant string value."""
if in_bool:
return StringConstant('true')
else:
return StringConstant('false')
def make_parameters(**kwargs):
"""Convert mutation query parameters from dictionary to string format.
"""
encoder = json.JSONEncoder()
parts = []
for k, v in kwargs.items():
if isinstance(v, StringConstant):
value = v.value
elif isinstance(v, ListConstant):
value = v.values
else:
value = encoder.encode(v)
parts.append("{}: {}".format(k, value))
return "\n ".join(parts)
SUBSCRIPTION = '''subscription {{
{subscription}
}}'''
| 26.545455 | 80 | 0.636301 | import json
class StringConstant:
"""Some values in GraphQL are constants, not strings, and so they shouldn't
be encoded or have quotes put around them. Use this to represent a constant
and it won't be quoted in the query"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return self.value
class ListConstant:
"""Some values in GraphQL are constants, not strings, and so they shouldn't
be encoded or have quotes put around them. Use this to represent a list of
constants and it won't be quoted in the query"""
def __init__(self, values):
self.values = [StringConstant(x) for x in values]
def BoolConstant(in_bool: bool):
""" Converts a boolean value to a constant string value."""
if in_bool:
return StringConstant('true')
else:
return StringConstant('false')
def make_parameters(**kwargs):
"""Convert mutation query parameters from dictionary to string format.
"""
encoder = json.JSONEncoder()
parts = []
for k, v in kwargs.items():
if isinstance(v, StringConstant):
value = v.value
elif isinstance(v, ListConstant):
value = v.values
else:
value = encoder.encode(v)
parts.append("{}: {}".format(k, value))
return "\n ".join(parts)
SUBSCRIPTION = '''subscription {{
{subscription}
}}'''
| 143 | 0 | 108 |
5778c501ffac90d2bab869aca329520148a4fada | 6,342 | py | Python | site/07.level2_demo_socket/pc_server/lib/opencv_lane_detection.py | FaBoPlatform/RobotCarAI | c89d3330a2beda0f253733d3252b2b035b153b6b | [
"Apache-2.0"
] | 10 | 2017-12-27T20:51:26.000Z | 2020-05-27T05:29:13.000Z | site/07.level2_demo_socket/pc_server/lib/opencv_lane_detection.py | FaBoPlatform/RobotCarAI | c89d3330a2beda0f253733d3252b2b035b153b6b | [
"Apache-2.0"
] | null | null | null | site/07.level2_demo_socket/pc_server/lib/opencv_lane_detection.py | FaBoPlatform/RobotCarAI | c89d3330a2beda0f253733d3252b2b035b153b6b | [
"Apache-2.0"
] | 3 | 2017-12-27T20:51:30.000Z | 2019-03-15T02:49:25.000Z | # coding: utf-8
# OpenCV ライン検出クラス
import cv2
import numpy as np
import time
import os
import sys
import math
from .functions import *
import platform
| 34.846154 | 143 | 0.498266 | # coding: utf-8
# OpenCV ライン検出クラス
import cv2
import numpy as np
import time
import os
import sys
import math
from .functions import *
import platform
class LaneDetection():
OUTPUT_DIR = './output'
OUTPUT_FILENAME = 'capture.avi'
vid = None
out = None
cv_bgr = None
def __init__(self,x_meter,y_meter,cols=160,rows=120):
self.x_meter = x_meter
self.y_meter = y_meter
self.cols = cols
self.rows = rows
########################################
# Region Of Interest Coordinates
########################################
self.roi_vertices = calc_roi_vertices(cols,rows,
# robocar camera demo_lane
top_width_rate=0.80,top_height_position=0.65,
bottom_width_rate=2.0,bottom_height_position=1)
########################################
# Inverse Perspective Mapping Coordinates
########################################
self.ipm_vertices = calc_ipm_vertices(cols,rows,
# robocar camera demo_lane
top_width_rate=0.80,top_height_position=0.65,
bottom_width_rate=2.0,bottom_height_position=1)
# ピクセルをメートルに変換
self.ym_per_pix = 1.0*self.y_meter/self.rows
self.xm_per_pix = 1.0*self.x_meter/self.cols
return
def __del__(self):
if self.vid is not None:
self.vid.release()
if self.out is not None:
self.out.release()
return
def init_webcam(self,fps=30,save=False):
'''
OpenCVカメラ準備
カメラ準備が出来たらTrue、失敗したらFalseを返す
'''
vid = None
if platform.machine() == 'aarch64':
vid = cv2.VideoCapture(1) # WebCam Jetson TX2 /dev/video1
elif platform.machine() == 'armv7l': # armv7l
vid = cv2.VideoCapture(0) # WebCam Raspberry Pi3 /dev/video0
else: # amd64
vid = cv2.VideoCapture(0) # WebCam
#vid = cv2.VideoCapture('udp://0084121c9205:8090') # GPU docker container id
print(vid.isOpened())
if not vid.isOpened():
# カメラオープン失敗は復旧できないので終了にする
raise IOError(("Couldn't open video file or webcam. If you're "
"trying to open a webcam, make sure you video_path is an integer!"))
fourcc = None
cv_version = cv2.__version__.split(".")
if cv_version[0] == '2':
# OpenCV 2.4
vid.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.cols)
vid.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.rows)
vid.set(cv2.cv.CV_CAP_PROP_FPS,fps)
fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
else:
# OpenCV 3.2
vid.set(cv2.CAP_PROP_FRAME_WIDTH, self.cols)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, self.rows)
vid.set(cv2.CAP_PROP_FPS,fps)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
self.vid = vid
self.save = save
if save:
mkdir(self.OUTPUT_DIR)
self.out = cv2.VideoWriter(os.path.join(self.OUTPUT_DIR, self.OUTPUT_FILENAME), int(fourcc), fps, (int(self.cols), int(self.rows)))
return
def webcam_capture(self):
'''
webcam画像を取得する
'''
retval, self.cv_bgr = self.vid.read()
if not retval:
print('Done.')
return False
if self.save:
# avi動画に保存する
self.out.write(self.cv_bgr)
return True
def lane_detection(self):
'''
ラインを検出する
args:
returns:
tilt1_deg: 手前の傾き角度。-が右、+が左
tilt2_deg: 奥の傾き角度。-が右、+が左
angle1_deg: 手前のカーブ角度。-が左、+が右
angle2_deg: 奥のカーブ角度。-が左、+が右
curve1_r: 手前のカーブ半径(m)
curve2_r: 奥のカーブ半径(m)
meters_from_center: 中央との距離(m)
'''
########################################
# Region Of Interest
########################################
cv_bgr = to_roi(self.cv_bgr,self.roi_vertices)
########################################
# Inverse Perspective Mapping
########################################
cv_bgr = to_ipm(cv_bgr,self.ipm_vertices)
########################################
# 白色抽出
########################################
cv_bgr = to_white(cv_bgr)
########################################
# 画像を2値化する
########################################
cv_bin = to_bin(cv_bgr)
########################################
# レーンを検出する
########################################
# sliding windowsを行い、ラインを構成するピクセル座標を求める
line_x, line_y = sliding_windows(cv_bin)
'''
実測値 メートル座標系における計算
'''
# 等間隔なy座標を生成する
plot_ym = np.linspace(0, self.rows-1, self.rows)*self.ym_per_pix
# ラインの二次多項式と座標を求める
line_polyfit_const, \
_pts_line = calc_line_curve(line_x*self.xm_per_pix,line_y*self.ym_per_pix,plot_ym)
########################################
# 弧の座標と角度を求める
# センターを上下2分割にして曲率半径と中心座標、y軸との傾き角度を計算する
########################################
quarter_y = (np.max(plot_ym) - np.min(plot_ym))/4
# 下半分を計算する
y0 = np.max(plot_ym) - 2*quarter_y
y1 = np.max(plot_ym)
curve1_x,curve1_y,curve1_r, \
rotate1_deg,angle1_deg, \
tilt1_deg = calc_curve(y0,y1,line_polyfit_const)
# 上半分を計算する
quarter_y = (np.max(plot_ym) - np.min(plot_ym))/4
y0 = np.min(plot_ym)
y1 = np.max(plot_ym) - 2*quarter_y
curve2_x,curve2_y,curve2_r, \
rotate2_deg,angle2_deg, \
tilt2_deg = calc_curve(y0,y1,line_polyfit_const)
# 中央線までの距離を計算する
# 最も下の位置で計算する
bottom_y = np.max(plot_ym)
bottom_x = line_polyfit_const[0]*bottom_y**2 + line_polyfit_const[1]*bottom_y + line_polyfit_const[2]
meters_from_center = bottom_x - (self.cols/2)*self.xm_per_pix
return tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r,meters_from_center
| 1,374 | 5,441 | 23 |
13413f38b80a1ed0ac2f9de626e861c39db750d2 | 2,127 | py | Python | sync_dokumen_support/sync_method_support.py | rabicuse96/sync_dokumen_support | 5fb201beccede0c5c67a9bc7053b0c21e3fb5217 | [
"MIT"
] | null | null | null | sync_dokumen_support/sync_method_support.py | rabicuse96/sync_dokumen_support | 5fb201beccede0c5c67a9bc7053b0c21e3fb5217 | [
"MIT"
] | null | null | null | sync_dokumen_support/sync_method_support.py | rabicuse96/sync_dokumen_support | 5fb201beccede0c5c67a9bc7053b0c21e3fb5217 | [
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappeclient import FrappeClient
import json
import os
import requests
import subprocess
from frappe.utils.background_jobs import enqueue
from frappe.utils import get_site_name
from frappe.utils import flt, nowdate, add_days, cint
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
| 25.626506 | 117 | 0.685472 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappeclient import FrappeClient
import json
import os
import requests
import subprocess
from frappe.utils.background_jobs import enqueue
from frappe.utils import get_site_name
from frappe.utils import flt, nowdate, add_days, cint
@frappe.whitelist()
def update_received_qty(po_name,prec, sync_site):
data_so = frappe.db.sql(""" select so.`name` from `tabSales Order` so
where so.sync_from_document_name = "{}" """.format(str(po_name)), as_dict = 1)
if data_so:
docu_so = frappe.get_doc("Sales Order", data_so[0]['name'])
coba = FrappeClient(sync_site, "administrator", "admin")
docu_prec = coba.get_doc("Purchase Receipt", prec)
for d in docu_so.items:
for i in docu_prec["items"]:
if d.item_code == i["item_code"]:
d.received_qty += i["received_qty"]
docu_so.save()
@frappe.whitelist()
def cancel_update_received_qty(po_name,prec, sync_site):
data_so = frappe.db.sql(""" select so.`name` from `tabSales Order` so
where so.sync_from_document_name = "{}" """.format(str(po_name)), as_dict = 1)
if data_so:
docu_so = frappe.get_doc("Sales Order", data_so[0]['name'])
coba = FrappeClient(sync_site, "administrator", "admin")
docu_prec = coba.get_doc("Purchase Receipt", prec)
for d in docu_so.items:
for i in docu_prec["items"]:
if d.item_code == i["item_code"]:
d.received_qty -= i["received_qty"]
docu_so.save()
@frappe.whitelist()
def validasi_received_qty(doc, method):
data_so = frappe.db.sql(""" select distinct(sinv.`sales_order`) as `so` from `tabSales Invoice Item` sinv
where sinv.`parent` = "{0}" """.format(doc.name), as_dict =1)
if data_so :
for d in data_so:
doc_so = frappe.get_doc("Sales Order", d.so)
if doc_so.sync_from_document_name:
for i in doc_so.items:
if i.delivered_qty != i.received_qty:
frappe.throw("Jumlah Delivered Quantity item {0} berbeda dengan dengan Received Quantity".format(i.item_code))
| 1,625 | 0 | 69 |
1166ea07b336e9cd63f3c7bb52a0665c7d05bbbe | 445 | py | Python | element/migrations/0006_auto_20200922_2036.py | FranchuFranchu/py-elemental | 7679815c8f8b8df5095c0c26c610179aa05d4ea0 | [
"MIT"
] | null | null | null | element/migrations/0006_auto_20200922_2036.py | FranchuFranchu/py-elemental | 7679815c8f8b8df5095c0c26c610179aa05d4ea0 | [
"MIT"
] | null | null | null | element/migrations/0006_auto_20200922_2036.py | FranchuFranchu/py-elemental | 7679815c8f8b8df5095c0c26c610179aa05d4ea0 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-09-22 20:36
from django.db import migrations
| 20.227273 | 47 | 0.573034 | # Generated by Django 3.0.5 on 2020-09-22 20:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('element', '0005_auto_20200922_2034'),
]
operations = [
migrations.RemoveField(
model_name='recipe',
name='ingredients',
),
migrations.RemoveField(
model_name='suggestion',
name='ingredients',
),
]
| 0 | 339 | 23 |
3603f7c3c0188c2320cdff52b05e5e85159f56c7 | 2,284 | py | Python | utils.py | AdamSpannbauer/vidstab_stitcher | 2ece76fd565452a36acc9b26ceaf399d41823910 | [
"MIT"
] | 1 | 2020-04-14T18:04:03.000Z | 2020-04-14T18:04:03.000Z | utils.py | AdamSpannbauer/vidstab_stitcher | 2ece76fd565452a36acc9b26ceaf399d41823910 | [
"MIT"
] | null | null | null | utils.py | AdamSpannbauer/vidstab_stitcher | 2ece76fd565452a36acc9b26ceaf399d41823910 | [
"MIT"
] | null | null | null | import math
import cv2
import numpy as np
import imutils
| 29.282051 | 109 | 0.530648 | import math
import cv2
import numpy as np
import imutils
def imshow_max_dim(winname, image, max_width=750):
h, w = image.shape[:2]
if w > max_width:
image = imutils.resize(image, width=max_width)
cv2.imshow(winname, image)
def image_montage(image_list, n_col, image_shape=(300, 300)):
n_col = min([len(image_list), n_col])
n_row = math.ceil(len(image_list) / n_col)
input_montage = imutils.build_montages(image_list, image_shape=image_shape, montage_shape=(n_col, n_row))
return input_montage[0]
def pad_to_same_dim(im_a, im_b):
h_a, w_a = im_a.shape[:2]
h_b, w_b = im_b.shape[:2]
pad_x_a = max([0, w_b - w_a])
pad_y_a = max([0, h_b - h_a])
pad_x_b = max([0, w_a - w_b])
pad_y_b = max([0, h_a - h_b])
im_a = cv2.copyMakeBorder(im_a,
top=0, bottom=pad_y_a,
left=0, right=pad_x_a,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0, 0])
im_b = cv2.copyMakeBorder(im_b,
top=0, bottom=pad_y_b,
left=0, right=pad_x_b,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0, 0])
return im_a, im_b
def pad_for_join(im_a, im_b):
h_a, w_a = im_a.shape[:2]
h_b, w_b = im_b.shape[:2]
pad_x = (w_a + w_b) // 2 + 1
pad_y = (h_a + h_b) // 2 + 1
border_size = max([pad_x, pad_y])
im_a = cv2.copyMakeBorder(im_a,
top=border_size, bottom=border_size,
left=border_size, right=border_size,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0, 0])
im_b = cv2.copyMakeBorder(im_b,
top=border_size, bottom=border_size,
left=border_size, right=border_size,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0, 0])
im_a, im_b = pad_to_same_dim(im_a, im_b)
return im_a, im_b
def layer_overlay(im_a, im_b):
negative_space = np.where(im_a[:, :, 3] == 0)
im_a[negative_space] = im_b[negative_space]
return im_a
| 2,107 | 0 | 115 |
12b0ca97f42a334646e051fe965f9ceec9b363c1 | 2,737 | py | Python | mhdata/merge/binary/metadata/monster_metadata.py | nikibobi/MHWorldData | 78b5a4dc10ef532d5bad7359ef0b098f99104782 | [
"MIT"
] | null | null | null | mhdata/merge/binary/metadata/monster_metadata.py | nikibobi/MHWorldData | 78b5a4dc10ef532d5bad7359ef0b098f99104782 | [
"MIT"
] | null | null | null | mhdata/merge/binary/metadata/monster_metadata.py | nikibobi/MHWorldData | 78b5a4dc10ef532d5bad7359ef0b098f99104782 | [
"MIT"
] | null | null | null | import json
from os.path import dirname, abspath, join
from mhdata.io.csv import read_csv
class MonsterMetadata:
"""
Attempt to load the various types of mappings that monsters have
Monsters have an internal numerical id used in some schemas, and varying string ids
used in other schemas. Note that string key sare inconsistent, so some magic is usually involved.
Therefore we load:
- Map keyed by name_en that gives the string keys for names and for hunter notes (can differ)
- Map keyed by internal id that connects to name_en (used for hitzones/statuses/etc)
""" | 34.64557 | 101 | 0.649616 | import json
from os.path import dirname, abspath, join
from mhdata.io.csv import read_csv
class MonsterMetaEntry:
def __init__(self, name, id, id_alt, key_name, key_description):
self.name = name
self.id = id
self.id_alt = id_alt
self.key_name = key_name
self.key_description = key_description
class MonsterMetadata:
"""
Attempt to load the various types of mappings that monsters have
Monsters have an internal numerical id used in some schemas, and varying string ids
used in other schemas. Note that string key sare inconsistent, so some magic is usually involved.
Therefore we load:
- Map keyed by name_en that gives the string keys for names and for hunter notes (can differ)
- Map keyed by internal id that connects to name_en (used for hitzones/statuses/etc)
"""
def __init__(self):
id_alt_keys = ['id_alt', 'id_alt2']
this_dir = dirname(abspath(__file__))
# Load data from the quest data dump project
# Note that since the key is a FILEPATH it can't be joined with the rest of the data
self.monster_data_ext = json.load(open(this_dir + '/metadata_files/MonsterData.json'))
monster_keys_csv = read_csv(this_dir + '/metadata_files/monster_map.csv')
monster_entries = [MonsterMetaEntry(
name=r['name_en'].strip(),
id=int(r['id'], 16),
id_alt=[int(r[key], 16) for key in id_alt_keys if r[key]],
key_name=r['key_name'],
key_description=r['key_description']
) for r in monster_keys_csv]
self._map = dict((r.name, r) for r in monster_entries)
self._map_by_id = dict((r.id, r) for r in monster_entries)
# Add alt keys. Note that they only go one way and cannot be reverse associated
for r in monster_entries:
for alt_id in r.id_alt:
self._map_by_id[alt_id] = r
def has_monster(self, monster_name):
return monster_name in self._map.keys()
def has_monster_id(self, monster_id):
return monster_id in self._map_by_id.keys()
def by_id(self, id) -> MonsterMetaEntry:
return self._map_by_id[id]
def by_name(self, name) -> MonsterMetaEntry:
return self._map[name]
def entries(self):
for entry in self._map.values():
yield entry
def get_ext(self, path_key):
return self.monster_data_ext['Monsters'].get(path_key)
def get_part(self, path_key, part_id):
data_ext = self.get_ext(path_key)
if not data_ext:
return None
try:
return data_ext['PartStringIds'][part_id]
except IndexError:
return 'OUT OF BOUNDS' | 1,865 | 2 | 265 |
0a305a756eba1e6bc543c2b30188c534a1106077 | 4,567 | py | Python | gocdapi/pipeline.py | joaogbcravo/gocdapi | 6c2f9a3dea01a705ab396802c6539272f4efe5e3 | [
"MIT"
] | 8 | 2015-01-23T12:50:30.000Z | 2020-01-21T11:00:19.000Z | gocdapi/pipeline.py | joaogbcravo/gocdapi | 6c2f9a3dea01a705ab396802c6539272f4efe5e3 | [
"MIT"
] | 7 | 2015-01-27T23:17:05.000Z | 2016-06-08T15:27:07.000Z | gocdapi/pipeline.py | joaogbcravo/gocdapi | 6c2f9a3dea01a705ab396802c6539272f4efe5e3 | [
"MIT"
] | 2 | 2015-11-23T18:33:24.000Z | 2020-07-15T09:01:34.000Z | """
Module for gocdapi Pipeline class
"""
import xml.etree.ElementTree as ET
from gocdapi.gobase import GoBase
from gocdapi.stage import Stage
from gocdapi.utils.config_xml import ConfigXML
class Pipeline(GoBase):
"""
Class to hold Go Server Pipeline information
"""
def __init__(self, go_server, data):
"""Inits Pipeline objects.
Args:
go_server (Go): A Go object which this agent belongs to.
data (str): A json string representing the pipeline configuration
"""
self.stages = []
super(self.__class__, self).__init__(go_server, data=data)
def __str__(self):
"""Returns a pretty representation of the object
Returns:
str: representation of the object
"""
return 'Pipeline @ %s' % self.go_server.baseurl
def schedule(self):
"""Triggers a new instance of the pipeline with the latest revision of all materials
Will do a POST request to go/api/pipelines/PIPELINE_NAME/schedule
"""
url = self.build_url('schedule')
self.do_post(url)
def release_lock(self):
"""Releases a lock on the pipeline
Will do a POST request to go/api/pipelines/PIPELINE_NAME/releaseLock
"""
url = self.build_url('releaseLock')
self.do_post(url)
def pause(self, pause_cause):
"""Pauses the pipeline with the given reason.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/pause
Args:
pause_cause (str): reason to pause the pipeline
"""
url = self.build_url('pause')
self.do_post(url, data={'pauseCause': pause_cause}, headers={'Confirm': True})
def unpause(self):
"""Unpauses the pipeline.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/unpause
"""
url = self.build_url('unpause')
self.do_post(url, headers={'Confirm': True})
def status(self):
"""Gets information about status of pipeline.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/status
Return:
dict: dict based in a JSON containing status information about paused, locked & schedulable.
"""
url = self.build_url('status')
return self.get_json_data(url)
def is_paused(self):
"""Check if pipeline is paused
Uses status method to get updated data.
Returns:
bool: True if paused
"""
return self.status()["paused"]
def is_locked(self):
"""Check if pipeline is locked
Uses status method to get updated data.
Returns:
bool: True if locked
"""
return self.status()["locked"]
def is_schedulable(self):
"""Check if pipeline is schedulable
Uses status method to get updated data.
Returns:
bool: True if schedulable
"""
return self.status()["schedulable"]
def history(self, offset=0):
"""List Pipeline history.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/history/OFFSET
Args:
offset (int): how many instances to skip
Returns:
str: JSON representing pipeline history
"""
url = self.build_url('history/%s' % offset)
return self.get_json_data(url)
def get_config_xml(self, to_string=False):
"""Get Configuration XML.
Will do a GET request to go/api/admin/config/current.xml to retrieve the current pipeline
configuration.
Args:
to_string (bool): Stringify the config XML before returning it
Returns:
str: XML string data
"""
_, config_xml_data = self.go_server.admin.poll_configuration()
config_xml = ConfigXML(config_xml_data)
pipeline_xml = config_xml.get_pipeline(self.name)
return ET.tostring(pipeline_xml) if to_string else pipeline_xml
def _poll(self):
"""Will create and define the attributes of the pipeline.
Uses _data attribute populated by inherited methods, updating object attributes using the bunch pattern.
Save stages of pipeline found in the configuration in a container.
Also sets the pipeline url.
"""
self.__dict__.update(self._data)
self.set_self_url('go/api/pipelines/%s/' % self.name)
self.stages = []
for item in self._data['stages']:
stage = Stage(self.go_server, self, item)
self.stages.append(stage)
| 29.275641 | 112 | 0.621852 | """
Module for gocdapi Pipeline class
"""
import xml.etree.ElementTree as ET
from gocdapi.gobase import GoBase
from gocdapi.stage import Stage
from gocdapi.utils.config_xml import ConfigXML
class Pipeline(GoBase):
"""
Class to hold Go Server Pipeline information
"""
def __init__(self, go_server, data):
"""Inits Pipeline objects.
Args:
go_server (Go): A Go object which this agent belongs to.
data (str): A json string representing the pipeline configuration
"""
self.stages = []
super(self.__class__, self).__init__(go_server, data=data)
def __str__(self):
"""Returns a pretty representation of the object
Returns:
str: representation of the object
"""
return 'Pipeline @ %s' % self.go_server.baseurl
def schedule(self):
"""Triggers a new instance of the pipeline with the latest revision of all materials
Will do a POST request to go/api/pipelines/PIPELINE_NAME/schedule
"""
url = self.build_url('schedule')
self.do_post(url)
def release_lock(self):
"""Releases a lock on the pipeline
Will do a POST request to go/api/pipelines/PIPELINE_NAME/releaseLock
"""
url = self.build_url('releaseLock')
self.do_post(url)
def pause(self, pause_cause):
"""Pauses the pipeline with the given reason.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/pause
Args:
pause_cause (str): reason to pause the pipeline
"""
url = self.build_url('pause')
self.do_post(url, data={'pauseCause': pause_cause}, headers={'Confirm': True})
def unpause(self):
"""Unpauses the pipeline.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/unpause
"""
url = self.build_url('unpause')
self.do_post(url, headers={'Confirm': True})
def status(self):
"""Gets information about status of pipeline.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/status
Return:
dict: dict based in a JSON containing status information about paused, locked & schedulable.
"""
url = self.build_url('status')
return self.get_json_data(url)
def is_paused(self):
"""Check if pipeline is paused
Uses status method to get updated data.
Returns:
bool: True if paused
"""
return self.status()["paused"]
def is_locked(self):
"""Check if pipeline is locked
Uses status method to get updated data.
Returns:
bool: True if locked
"""
return self.status()["locked"]
def is_schedulable(self):
"""Check if pipeline is schedulable
Uses status method to get updated data.
Returns:
bool: True if schedulable
"""
return self.status()["schedulable"]
def history(self, offset=0):
"""List Pipeline history.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/history/OFFSET
Args:
offset (int): how many instances to skip
Returns:
str: JSON representing pipeline history
"""
url = self.build_url('history/%s' % offset)
return self.get_json_data(url)
def get_config_xml(self, to_string=False):
"""Get Configuration XML.
Will do a GET request to go/api/admin/config/current.xml to retrieve the current pipeline
configuration.
Args:
to_string (bool): Stringify the config XML before returning it
Returns:
str: XML string data
"""
_, config_xml_data = self.go_server.admin.poll_configuration()
config_xml = ConfigXML(config_xml_data)
pipeline_xml = config_xml.get_pipeline(self.name)
return ET.tostring(pipeline_xml) if to_string else pipeline_xml
def _poll(self):
"""Will create and define the attributes of the pipeline.
Uses _data attribute populated by inherited methods, updating object attributes using the bunch pattern.
Save stages of pipeline found in the configuration in a container.
Also sets the pipeline url.
"""
self.__dict__.update(self._data)
self.set_self_url('go/api/pipelines/%s/' % self.name)
self.stages = []
for item in self._data['stages']:
stage = Stage(self.go_server, self, item)
self.stages.append(stage)
| 0 | 0 | 0 |
5d318299b2078509381b332915b7a02451448681 | 434 | py | Python | Object-Oriented Programming/Python/python_assignments_set1/sol11.py | neeladripal/bcse-lab | 915d2f535ae95a062438fc85980419646a3951ad | [
"MIT"
] | null | null | null | Object-Oriented Programming/Python/python_assignments_set1/sol11.py | neeladripal/bcse-lab | 915d2f535ae95a062438fc85980419646a3951ad | [
"MIT"
] | null | null | null | Object-Oriented Programming/Python/python_assignments_set1/sol11.py | neeladripal/bcse-lab | 915d2f535ae95a062438fc85980419646a3951ad | [
"MIT"
] | 1 | 2021-08-06T14:39:53.000Z | 2021-08-06T14:39:53.000Z | print ("Pythagorean Triplets with smaller side upto 10 -->")
# form : (m^2 - n^2, 2*m*n, m^2 + n^2)
# generate all (m, n) pairs such that m^2 - n^2 <= 10
# if we take (m > n), for m >= 6, m^2 - n^2 will always be greater than 10
# so m ranges from 1 to 5 and n ranges from 1 to m-1
pythTriplets = [(m*m - n*n, 2*m*n, m*m + n*n) for (m,n) in [(x, y) for x in range (1, 6) for y in range (1, x)] if m*m - n*n <= 10]
print (pythTriplets) | 62 | 131 | 0.587558 | print ("Pythagorean Triplets with smaller side upto 10 -->")
# form : (m^2 - n^2, 2*m*n, m^2 + n^2)
# generate all (m, n) pairs such that m^2 - n^2 <= 10
# if we take (m > n), for m >= 6, m^2 - n^2 will always be greater than 10
# so m ranges from 1 to 5 and n ranges from 1 to m-1
pythTriplets = [(m*m - n*n, 2*m*n, m*m + n*n) for (m,n) in [(x, y) for x in range (1, 6) for y in range (1, x)] if m*m - n*n <= 10]
print (pythTriplets) | 0 | 0 | 0 |
49bc76ea7e052914e845ae77607e572541c6bb25 | 1,777 | py | Python | lazy_script/lazy.py | felipefinhane/dotfiles | 460af61025dc0ea3ac0f646f819121ee9cc7c664 | [
"MIT"
] | null | null | null | lazy_script/lazy.py | felipefinhane/dotfiles | 460af61025dc0ea3ac0f646f819121ee9cc7c664 | [
"MIT"
] | null | null | null | lazy_script/lazy.py | felipefinhane/dotfiles | 460af61025dc0ea3ac0f646f819121ee9cc7c664 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#github.com/intrackeable/dotfiles
#Set a random wallpaper and change gaps level
import random
import os
import subprocess | 32.907407 | 87 | 0.586944 | #!/usr/bin/env python3
#github.com/intrackeable/dotfiles
#Set a random wallpaper and change gaps level
import random
import os
import subprocess
class Ricer:
def __init__(self, wm_path, wallpaper_path, gaps):
self.wm_path = wm_path
self.wallpaper_path = wallpaper_path
self.gaps = gaps
def search_images(self):
files = os.listdir(self.wallpaper_path)
images = []
for file in files:
if file.endswith('png') or file.endswith('jpeg') or file.endswith('jpg'):
images.append(file)
if not images:
subprocess.call('notify-send "No files found in this folder"', shell=True)
exit()
else:
return images
def random_wallpaper(self):
wallpaper = self.wallpaper_path + random.choice(self.search_images())
return wallpaper
def set_wallpaper(self):
subprocess.call('feh --bg-fill {}'.format(self.random_wallpaper()), shell=True)
def restart_wm(self):
subprocess.call('i3-msg restart', shell=True)
def replace_gaps(self):
if (self.gaps >= 0 and self.gaps <= 30):
config = open(self.wm_path).read().splitlines()
for index, element in enumerate(config):
if element.startswith('gaps inner'):
config[index] = 'gaps inner {}'.format(self.gaps)
if element.startswith('gaps outer'):
config[index] = 'gaps outer {}'.format(self.gaps)
with open(self.wm_path, 'w') as file:
file.writelines('\n'.join(config))
self.restart_wm()
else:
subprocess.call('notify-send "Unexpected level of gaps"', shell=True)
exit() | 1,449 | -9 | 192 |
e0e877aac80e0b4e85435a857590ac5083b7e7a5 | 28 | py | Python | src/fractal/world/time/step/__init__.py | jedhsu/fractal | 97833ddc5063fae72352cf590738fef508c02f0c | [
"MIT"
] | null | null | null | src/fractal/world/time/step/__init__.py | jedhsu/fractal | 97833ddc5063fae72352cf590738fef508c02f0c | [
"MIT"
] | null | null | null | src/fractal/world/time/step/__init__.py | jedhsu/fractal | 97833ddc5063fae72352cf590738fef508c02f0c | [
"MIT"
] | null | null | null | from ._step import Timestep
| 14 | 27 | 0.821429 | from ._step import Timestep
| 0 | 0 | 0 |
8f67b5b8eabdaa67557f943060ecbfeabde16744 | 30,350 | py | Python | src/automata_learning_with_policybank/aqrm.py | logic-and-learning/AdvisoRL | 3bbd741e681e6ea72562fec142d54e9d781d097d | [
"MIT"
] | 4 | 2021-02-04T17:33:07.000Z | 2022-01-24T10:29:39.000Z | src/automata_learning_with_policybank/aqrm.py | logic-and-learning/AdvisoRL | 3bbd741e681e6ea72562fec142d54e9d781d097d | [
"MIT"
] | null | null | null | src/automata_learning_with_policybank/aqrm.py | logic-and-learning/AdvisoRL | 3bbd741e681e6ea72562fec142d54e9d781d097d | [
"MIT"
] | null | null | null | import numpy as np
import random, time
import tensorflow as tf
from automata_learning_utils import al_utils
from worlds.game import *
from automata_learning_with_policybank.policy_bank_dqn import PolicyBankDQN
from common.schedules import LinearSchedule
from common.replay_buffer import create_experience_replay_buffer
from automata_learning_with_policybank.Traces import Traces
from tester_policybank.tester import TesterPolicyBank as Tester
import qrm
import shutil
import os
import subprocess
import csv
#import pdb
######## compare rm_learned & rm_true to find conflicting experiences
######## check is_rm_learned in run_aqrm_task and break all operations if rewards don't match
######## make new tester tester_current with past and new experience
######## pass tester_current.get_reward_machines as arguments to decompose_reward_machines
######## make new tester
######## make copies of pertinent reward machines to some file path
######## write new experiment file sifting through each of the reward machines
def run_aqrm_task(sess, epsilon, environment_rm_file, learned_rm_file, policy_bank, tester_true, tester_learned, curriculum, replay_buffer, beta_schedule, show_print, is_rm_learned, currentstep, previous_testing_reward, q):
"""
This code runs one training episode.
- rm_file: It is the path towards the RM machine to solve on this episode
- environment_rm: an environment reward machine, the "true" one, underlying the execution
"""
# Initializing parameters and the game
learning_params = tester_learned.learning_params
testing_params = tester_learned.testing_params
"""
here, tester holds all the machines. we would like to dynamically update the machines every so often.
an option might be to read it every time a new machine is learnt
"""
reward_machines = [tester_learned.get_hypothesis_machine()]
task_params = tester_learned.get_task_params(learned_rm_file) # rm_files redundant here unless in water world (in which case it provides the map files based on the task)
rm_true = tester_true.get_reward_machines()[0] # add one more input n to track tasks at hand, replace 0 with n
rm_learned = tester_learned.get_hypothesis_machine()
task = Game(task_params)
actions = task.get_actions()
ok = 1
num_features = len(task.get_features())
num_steps = learning_params.max_timesteps_per_task
training_reward = 0
is_conflicting=1 #by default add traces
testing_reward = None #initialize
# Getting the initial state of the environment and the reward machine
s1, s1_features = task.get_state_and_features()
u1 = rm_learned.get_initial_state()
u1_true = rm_true.get_initial_state()
has_been = [0,0]
alpha = 0.8
gamma = 0.99
w = 0
# Starting interaction with the environment
if show_print: print("Executing", num_steps)
all_events = []
sy_s = [[]]
a_s = []
a=0
for t in range(num_steps):
currentstep += 1
s = np.where(s1_features==1)[0][0]
# sy = s%11+1
# sx = (s-sy+1)/11+1
# sy_s.append([sx,sy])
# a_s.append(a)
# Choosing an action to perform
if random.random() < 0.15:
a = random.choice(actions)
else:
#IG: current problem: there is no machine so a default behavior is to stop the exploration. We would, however, like to explore (randomly if necessary).
# how to accomplish that?
#if using suggestions in comments on line 33, replace 0 with n
if ok:
a = policy_bank.get_best_action(0, u1, s1_features.reshape((1,num_features)))
else:
pr = np.zeros([4,1])
pr_sum = 0
pr_select = np.zeros([5,1])
for a in actions:
pr_sum += np.exp(q[s][u1_true][a])
for a in actions:
pr[a] = np.exp(q[s][u1_true][a])/pr_sum
pr_select[0] = 0
pr_select[1] = pr[0]
pr_select[2] = pr[0]+pr[1]
pr_select[3] = pr[0]+pr[1]+pr[2]
pr_select[4] = 1
randn = random.random()
a_selected = -1
for a in actions:
if randn >= pr_select[a] and randn <= pr_select[a+1]:
a_selected = a
break
a = a_selected
# updating the curriculum
curriculum.add_step()
# Executing the action
if tester_learned.game_type=="trafficworld":
events = task.get_true_propositions_action(a)
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
else:
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
events = task.get_true_propositions()
s2, s2_features = task.get_state_and_features()
s_new = np.where(s2_features==1)[0][0]
u2 = rm_learned.get_next_state(u1, events)
u2_true = rm_true.get_next_state(u1_true,events)
reward = rm_true.get_reward(u1_true,u2_true,s1,a,s2)
# q[s][u1_true][a] = (1 - alpha) * q[s][u1_true][a] + alpha * (reward + gamma * np.amax(q[s_new][u2_true]))
sy = s%9
sx = (s-sy)/9
synew = s_new % 9
sxnew = (s_new - synew) / 9
a1=a
if (events == "f"):
events
all_events.append(events)
if reward>0:
reward
training_reward += reward
# Getting rewards and next states for each reward machine
rewards, next_states = [],[]
rewards_hyp, next_states_hyp = [],[]
j_rewards, j_next_states = rm_true.get_rewards_and_next_states(s1, a, s2, events)
rewards.append(j_rewards)
next_states.append(j_next_states)
j_rewards_hyp, j_next_states_hyp = rm_learned.get_rewards_and_next_states(s1, a, s2, events)
rewards_hyp.append(j_rewards_hyp)
next_states_hyp.append(j_next_states_hyp)
# Mapping rewards and next states to specific policies in the policy bank
rewards_hyp = policy_bank.select_rewards(rewards_hyp)
next_policies = policy_bank.select_next_policies(next_states_hyp)
# Adding this experience to the experience replay buffer
replay_buffer.add(s1_features, a, s2_features, rewards_hyp, next_policies)
# Learning
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.train_freq == 0:
if learning_params.prioritized_replay:
experience = replay_buffer.sample(learning_params.batch_size, beta=beta_schedule.value(curriculum.get_current_step()))
S1, A, S2, Rs, NPs, weights, batch_idxes = experience
else:
S1, A, S2, Rs, NPs = replay_buffer.sample(learning_params.batch_size)
weights, batch_idxes = None, None
abs_td_errors = policy_bank.learn(S1, A, S2, Rs, NPs, weights, has_been) # returns the absolute td_error
if learning_params.prioritized_replay:
new_priorities = abs_td_errors + learning_params.prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
# Updating the target network
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.target_network_update_freq == 0:
policy_bank.update_target_network()
# Printing
if show_print and (t+1) % learning_params.print_freq == 0:
print("Step:", t+1, "\tTotal reward:", training_reward)
if testing_params.test and curriculum.get_current_step() % testing_params.test_freq==0:
testing_reward = tester_learned.run_test(curriculum.get_current_step(), sess, run_aqrm_test, rm_learned, rm_true, is_rm_learned, q, policy_bank, num_features)
if is_rm_learned==0:
if task.is_env_game_over() or rm_true.is_terminal_state(u2_true):
# Restarting the game
task = Game(task_params)
if curriculum.stop_task(t):
break
s2, s2_features = task.get_state_and_features()
u2_true = rm_true.get_initial_state()
else:
if task.is_env_game_over() or rm_learned.is_terminal_state(u2) or rm_true.is_terminal_state(u2_true):
# Restarting the game
task = Game(task_params)
if curriculum.stop_task(t):
break
s2, s2_features = task.get_state_and_features()
u2_true = rm_true.get_initial_state()
u2 = rm_learned.get_initial_state()
# checking the steps time-out
if curriculum.stop_learning():
break
# Moving to the next state
s1, s1_features, u1 = s2, s2_features, u2
u1_true = u2_true
if rm_true.is_terminal_state(u2_true):
checker = rm_learned.is_terminal_state(u2)
if (is_rm_learned) and (not rm_learned.is_terminal_state(u2)) and (not rm_true.is_terminal_state(u2_true)):
is_conflicting = 0
elif is_rm_learned and (rm_learned.is_terminal_state(u2) and rm_true.is_terminal_state(u2_true)):
is_conflicting = 0
else:
is_conflicting = 1
step_count=t
if testing_reward is None:
is_test_result = 0
testing_reward = previous_testing_reward
else:
is_test_result = 1
if show_print: print("Done! Total reward:", training_reward)
return all_events, training_reward, step_count, is_conflicting, testing_reward, is_test_result, q
| 40.359043 | 351 | 0.588534 | import numpy as np
import random, time
import tensorflow as tf
from automata_learning_utils import al_utils
from worlds.game import *
from automata_learning_with_policybank.policy_bank_dqn import PolicyBankDQN
from common.schedules import LinearSchedule
from common.replay_buffer import create_experience_replay_buffer
from automata_learning_with_policybank.Traces import Traces
from tester_policybank.tester import TesterPolicyBank as Tester
import qrm
import shutil
import os
import subprocess
import csv
#import pdb
######## compare rm_learned & rm_true to find conflicting experiences
######## check is_rm_learned in run_aqrm_task and break all operations if rewards don't match
######## make new tester tester_current with past and new experience
######## pass tester_current.get_reward_machines as arguments to decompose_reward_machines
######## make new tester
######## make copies of pertinent reward machines to some file path
######## write new experiment file sifting through each of the reward machines
def run_aqrm_task(sess, epsilon, environment_rm_file, learned_rm_file, policy_bank, tester_true, tester_learned, curriculum, replay_buffer, beta_schedule, show_print, is_rm_learned, currentstep, previous_testing_reward, q):
"""
This code runs one training episode.
- rm_file: It is the path towards the RM machine to solve on this episode
- environment_rm: an environment reward machine, the "true" one, underlying the execution
"""
# Initializing parameters and the game
learning_params = tester_learned.learning_params
testing_params = tester_learned.testing_params
"""
here, tester holds all the machines. we would like to dynamically update the machines every so often.
an option might be to read it every time a new machine is learnt
"""
reward_machines = [tester_learned.get_hypothesis_machine()]
task_params = tester_learned.get_task_params(learned_rm_file) # rm_files redundant here unless in water world (in which case it provides the map files based on the task)
rm_true = tester_true.get_reward_machines()[0] # add one more input n to track tasks at hand, replace 0 with n
rm_learned = tester_learned.get_hypothesis_machine()
task = Game(task_params)
actions = task.get_actions()
ok = 1
num_features = len(task.get_features())
num_steps = learning_params.max_timesteps_per_task
training_reward = 0
is_conflicting=1 #by default add traces
testing_reward = None #initialize
# Getting the initial state of the environment and the reward machine
s1, s1_features = task.get_state_and_features()
u1 = rm_learned.get_initial_state()
u1_true = rm_true.get_initial_state()
has_been = [0,0]
alpha = 0.8
gamma = 0.99
w = 0
# Starting interaction with the environment
if show_print: print("Executing", num_steps)
all_events = []
sy_s = [[]]
a_s = []
a=0
for t in range(num_steps):
currentstep += 1
s = np.where(s1_features==1)[0][0]
# sy = s%11+1
# sx = (s-sy+1)/11+1
# sy_s.append([sx,sy])
# a_s.append(a)
# Choosing an action to perform
if random.random() < 0.15:
a = random.choice(actions)
else:
#IG: current problem: there is no machine so a default behavior is to stop the exploration. We would, however, like to explore (randomly if necessary).
# how to accomplish that?
#if using suggestions in comments on line 33, replace 0 with n
if ok:
a = policy_bank.get_best_action(0, u1, s1_features.reshape((1,num_features)))
else:
pr = np.zeros([4,1])
pr_sum = 0
pr_select = np.zeros([5,1])
for a in actions:
pr_sum += np.exp(q[s][u1_true][a])
for a in actions:
pr[a] = np.exp(q[s][u1_true][a])/pr_sum
pr_select[0] = 0
pr_select[1] = pr[0]
pr_select[2] = pr[0]+pr[1]
pr_select[3] = pr[0]+pr[1]+pr[2]
pr_select[4] = 1
randn = random.random()
a_selected = -1
for a in actions:
if randn >= pr_select[a] and randn <= pr_select[a+1]:
a_selected = a
break
a = a_selected
# updating the curriculum
curriculum.add_step()
# Executing the action
if tester_learned.game_type=="trafficworld":
events = task.get_true_propositions_action(a)
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
else:
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
events = task.get_true_propositions()
s2, s2_features = task.get_state_and_features()
s_new = np.where(s2_features==1)[0][0]
u2 = rm_learned.get_next_state(u1, events)
u2_true = rm_true.get_next_state(u1_true,events)
reward = rm_true.get_reward(u1_true,u2_true,s1,a,s2)
# q[s][u1_true][a] = (1 - alpha) * q[s][u1_true][a] + alpha * (reward + gamma * np.amax(q[s_new][u2_true]))
sy = s%9
sx = (s-sy)/9
synew = s_new % 9
sxnew = (s_new - synew) / 9
a1=a
if (events == "f"):
events
all_events.append(events)
if reward>0:
reward
training_reward += reward
# Getting rewards and next states for each reward machine
rewards, next_states = [],[]
rewards_hyp, next_states_hyp = [],[]
j_rewards, j_next_states = rm_true.get_rewards_and_next_states(s1, a, s2, events)
rewards.append(j_rewards)
next_states.append(j_next_states)
j_rewards_hyp, j_next_states_hyp = rm_learned.get_rewards_and_next_states(s1, a, s2, events)
rewards_hyp.append(j_rewards_hyp)
next_states_hyp.append(j_next_states_hyp)
# Mapping rewards and next states to specific policies in the policy bank
rewards_hyp = policy_bank.select_rewards(rewards_hyp)
next_policies = policy_bank.select_next_policies(next_states_hyp)
# Adding this experience to the experience replay buffer
replay_buffer.add(s1_features, a, s2_features, rewards_hyp, next_policies)
# Learning
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.train_freq == 0:
if learning_params.prioritized_replay:
experience = replay_buffer.sample(learning_params.batch_size, beta=beta_schedule.value(curriculum.get_current_step()))
S1, A, S2, Rs, NPs, weights, batch_idxes = experience
else:
S1, A, S2, Rs, NPs = replay_buffer.sample(learning_params.batch_size)
weights, batch_idxes = None, None
abs_td_errors = policy_bank.learn(S1, A, S2, Rs, NPs, weights, has_been) # returns the absolute td_error
if learning_params.prioritized_replay:
new_priorities = abs_td_errors + learning_params.prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
# Updating the target network
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.target_network_update_freq == 0:
policy_bank.update_target_network()
# Printing
if show_print and (t+1) % learning_params.print_freq == 0:
print("Step:", t+1, "\tTotal reward:", training_reward)
if testing_params.test and curriculum.get_current_step() % testing_params.test_freq==0:
testing_reward = tester_learned.run_test(curriculum.get_current_step(), sess, run_aqrm_test, rm_learned, rm_true, is_rm_learned, q, policy_bank, num_features)
if is_rm_learned==0:
if task.is_env_game_over() or rm_true.is_terminal_state(u2_true):
# Restarting the game
task = Game(task_params)
if curriculum.stop_task(t):
break
s2, s2_features = task.get_state_and_features()
u2_true = rm_true.get_initial_state()
else:
if task.is_env_game_over() or rm_learned.is_terminal_state(u2) or rm_true.is_terminal_state(u2_true):
# Restarting the game
task = Game(task_params)
if curriculum.stop_task(t):
break
s2, s2_features = task.get_state_and_features()
u2_true = rm_true.get_initial_state()
u2 = rm_learned.get_initial_state()
# checking the steps time-out
if curriculum.stop_learning():
break
# Moving to the next state
s1, s1_features, u1 = s2, s2_features, u2
u1_true = u2_true
if rm_true.is_terminal_state(u2_true):
checker = rm_learned.is_terminal_state(u2)
if (is_rm_learned) and (not rm_learned.is_terminal_state(u2)) and (not rm_true.is_terminal_state(u2_true)):
is_conflicting = 0
elif is_rm_learned and (rm_learned.is_terminal_state(u2) and rm_true.is_terminal_state(u2_true)):
is_conflicting = 0
else:
is_conflicting = 1
step_count=t
if testing_reward is None:
is_test_result = 0
testing_reward = previous_testing_reward
else:
is_test_result = 1
if show_print: print("Done! Total reward:", training_reward)
return all_events, training_reward, step_count, is_conflicting, testing_reward, is_test_result, q
def run_aqrm_test(sess, reward_machines, task_params, rm, rm_true, is_learned, q, learning_params, testing_params, optimal, policy_bank, num_features):
# Initializing parameters
task = Game(task_params)
s1, s1_features = task.get_state_and_features()
u1 = rm.get_initial_state()
u1_true = rm_true.get_initial_state()
alpha = 0.8
gamma = 0.99
w = 0
ok = 1
has_been = [0,0]
# Starting interaction with the environment
r_total = 0
for t in range(testing_params.num_steps):
# Choosing an action to perform
actions = task.get_actions()
s = np.where(s1_features==1)[0][0]
if ok:
a = policy_bank.get_best_action(0, u1, s1_features.reshape((1,num_features)))
else:
pr = np.zeros([4,1])
pr_sum = 0
pr_select = np.zeros([5,1])
for a in actions:
pr_sum += np.exp(q[s][u1_true][a])
for a in actions:
pr[a] = np.exp(q[s][u1_true][a])/pr_sum
pr_select[0] = 0
pr_select[1] = pr[0]
pr_select[2] = pr[0]+pr[1]
pr_select[3] = pr[0]+pr[1]+pr[2]
pr_select[4] = 1
randn = random.random()
a_selected = -1
for a in actions:
if randn >= pr_select[a] and randn <= pr_select[a+1]:
a_selected = a
break
a = a_selected
# Executing the action
if task_params.game_type=="trafficworld":
event = task.get_true_propositions_action(a)
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
else:
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
event = task.get_true_propositions()
s2, s2_features = task.get_state_and_features()
u2 = rm.get_next_state(u1, event)
u2_true = rm_true.get_next_state(u1_true, event)
r = rm_true.get_reward(u1_true,u2_true,s1,a,s2)
s_new = np.where(s2_features==1)[0][0]
# q[s][u1_true][a] = (1 - alpha) * q[s][u1_true][a] + alpha * (r + gamma * np.amax(q[s_new][u2_true]))
if (event == "f"):
event
r_total += r * learning_params.gamma**t # used in original graphing framework
# Restarting the environment (Game Over)
if is_learned==0:
if task.is_env_game_over() or rm_true.is_terminal_state(u2_true):
break
else:
if task.is_env_game_over() or rm.is_terminal_state(u2):
break
# Moving to the next state
s1, s1_features, u1 = s2, s2_features, u2
u1_true = u2_true
if rm_true.is_terminal_state(u2_true) and r>0:
return 1
else:
return 0
return r_total
def _remove_files_from_folder(relative_path):
dirname = os.path.abspath(os.path.dirname(__file__))
parent_folder = os.path.normpath(os.path.join(dirname, relative_path))
if os.path.isdir(parent_folder):
for filename in os.listdir(parent_folder):
absPath = os.path.join(parent_folder, filename)
subprocess.run(["rm", absPath])
else:
print("There is no directory {}".format(parent_folder))
def run_aqrm_experiments(alg_name, tester, tester_learned, curriculum, num_times, show_print, show_plots, is_SAT):
learning_params = tester_learned.learning_params
testing_params = tester_learned.testing_params
# just in case, delete all temporary files
dirname = os.path.abspath(os.path.dirname(__file__))
_remove_files_from_folder("../automata_learning_utils/data")
# Running the tasks 'num_times'
time_init = time.time()
plot_dict = dict()
rewards_plot = list()
new_traces = Traces(set(), set())
for t in range(num_times):
# Setting the random seed to 't'
random.seed(t)
sess = tf.Session()
open('./automata_learning_utils/data/data.txt','w').close
open('./automata_learning_utils/data/automaton.txt','w').close
# Reseting default values
curriculum.restart()
# Creating the experience replay buffer
replay_buffer, beta_schedule = create_experience_replay_buffer(learning_params.buffer_size, learning_params.prioritized_replay, learning_params.prioritized_replay_alpha, learning_params.prioritized_replay_beta0, curriculum.total_steps if learning_params.prioritized_replay_beta_iters is None else learning_params.prioritized_replay_beta_iters)
hm_file = './automata_learning/hypothesis_machine.txt'
shutil.copy(hm_file,'./automata_learning_utils/data/rm.txt') #######
# Creating policy bank
task_aux = Game(tester.get_task_params(curriculum.get_current_task()))
num_features = len(task_aux.get_features())
num_actions = len(task_aux.get_actions())
q = np.zeros([1681,8,4])
num_episodes = 0
total = 0
learned = 0
step = 0
enter_loop = 1
num_conflicting_since_learn = 0
update_rm = 0
refreshed = 0
testing_step = 0
hypothesis_machine = tester.get_hypothesis_machine()
tester_learned.update_hypothesis_machine()
policy_bank = PolicyBankDQN(sess, num_actions, num_features, learning_params, hypothesis_machine)
# Task loop
automata_history = []
rewards = list()
episodes = list()
steps = list()
testing_reward = 0 #initializes value
all_traces = Traces(set(),set())
epsilon = 0.3
tt=t+1
print("run index:", +tt)
while not curriculum.stop_learning():
num_episodes += 1
if show_print: print("Current step:", curriculum.get_current_step(), "from", curriculum.total_steps)
rm_file_truth = '../experiments/craft/reward_machines/t1.txt' #set file path at beginning
###### is ^ redundant?
# update hypothesis machine for every iteration of run_aqrm_task (in using two experiment files)
# Running 'task_rm_id' for one episode
hm_file_update = './automata_learning_utils/data/rm.txt'
if learned==0:
rm_file_learned = hm_file
if update_rm:
tf.reset_default_graph()
sess = tf.Session()
policy_bank = PolicyBankDQN(sess, num_actions, num_features, learning_params, hypothesis_machine)
update_rm = 0
refreshed = 1
tester_learned.update_hypothesis_machine_file('./automata_learning/hypothesis_machine.txt')
tester_learned.update_hypothesis_machine()
all_traces = Traces(set(),set())
num_conflicting_since_learn = 0
# q = np.zeros([1681,4,4])
enter_loop = 1
elif update_rm:
rm_file_learned = hm_file_update
tf.reset_default_graph()
sess = tf.Session()
# Reseting default values
#####curriculum.restart()
# Creating the experience replay buffer
replay_buffer, beta_schedule = create_experience_replay_buffer(learning_params.buffer_size,
learning_params.prioritized_replay,
learning_params.prioritized_replay_alpha,
learning_params.prioritized_replay_beta0,
curriculum.total_steps if learning_params.prioritized_replay_beta_iters is None else learning_params.prioritized_replay_beta_iters)
# Creating policy bank
task_aux = Game(tester.get_task_params(curriculum.get_current_task()))
num_features = len(task_aux.get_features())
num_actions = len(task_aux.get_actions())
rm_learned = tester_learned.get_hypothesis_machine() # used to be rm_learned = tester_learned.get_reward_machines()[0]
if len(rm_learned.U)<=15:
print("number of states:" + str(len(rm_learned.U)))
#policy_bank = qrm.policy_bank_dqn.PolicyBankDQN(sess, num_actions, num_features, learning_params, tester_current.get_reward_machines())
policy_bank = PolicyBankDQN(sess, num_actions, num_features, learning_params, rm_learned)
else:
print("number of states:" + str(len(rm_learned.U)))
tf.reset_default_graph()
sess = tf.Session()
policy_bank = PolicyBankDQN(sess, num_actions, num_features, learning_params, hypothesis_machine)
update_rm = 0
refreshed = 1
tester_learned.update_hypothesis_machine_file('./automata_learning/hypothesis_machine.txt')
tester_learned.update_hypothesis_machine()
all_traces = Traces(set(), set())
num_conflicting_since_learn = 0
q = np.zeros([1681, 4, 4])
enter_loop = 1
learned = 0
update_rm = 0
else:
pass
automata_history.append(rm_file_learned) #####fix this
epsilon = epsilon*0.99
all_events, found_reward, stepcount, conflicting, testing_reward, is_test, q = run_aqrm_task(sess, epsilon, rm_file_truth, rm_file_learned, policy_bank, tester, tester_learned, curriculum, replay_buffer, beta_schedule, show_print, learned, step, testing_reward, q)
#set up traces; we remove anything foreign to our ground truth formula
if tester.game_type=="officeworld":
while 'h' in all_events:
all_events.remove('h')
elif tester.game_type=="trafficworld":
while 'f' in all_events:
all_events.remove('f')
while 'g' in all_events:
all_events.remove('g')
elif tester.game_type=="craftworld":
while 'd' in all_events:
all_events.remove('d')
while 'g' in all_events:
all_events.remove('g')
while 'h' in all_events:
all_events.remove('h')
while '' in all_events:
all_events.remove('')
if (conflicting==1 or refreshed==1):
all_traces.add_trace(all_events, found_reward, learned)
if (num_episodes%100==0):
print("run index:", +tt)
toprint = "Total training reward at "+str(step)+": "+str(total)
print(toprint)
if num_episodes>5000:
num_episodes
total += found_reward
step += stepcount
num_conflicting_since_learn += conflicting
rewards.append(found_reward)
episodes.append(num_episodes)
steps.append(step)
if is_test:
testing_step += testing_params.test_freq
if testing_step in plot_dict:
plot_dict[testing_step].append(testing_reward)
else:
plot_dict[testing_step] = [testing_reward]
if learned==1:
# if len(rm_learned.U)>learning_params.relearn_period:
# learned = 0
# update_rm = 1
if num_episodes%learning_params.relearn_period==0 and (num_conflicting_since_learn>0):
enter_loop = 1
if conflicting==1:
new_traces.add_trace(all_events, found_reward, learned)
if (len(all_traces.positive)>=learning_params.enter_loop) and enter_loop:
positive = set()
negative = set()
if learned==0:
if len(all_traces.positive)>0:
for i in list(all_traces.positive):
if all_traces.symbol_to_trace(i) not in positive:
positive.add(all_traces.symbol_to_trace(i))
if len(all_traces.negative)>0:
for i in list(all_traces.negative):
if all_traces.symbol_to_trace(i) not in negative:
negative.add(all_traces.symbol_to_trace(i))
else:
if len(new_traces.positive)>0:
for i in list(new_traces.positive):
if new_traces.symbol_to_trace(i) not in positive:
positive.add(new_traces.symbol_to_trace(i))
if len(new_traces.negative)>0 and len(all_traces.negative):
for i in list(new_traces.negative):
if new_traces.symbol_to_trace(i) not in negative:
negative.add(new_traces.symbol_to_trace(i))
positive_new = set() ## to get rid of redundant prefixes
negative_new = set()
if not learned:
for ptrace in positive:
new_trace = list()
previous_prefix = 100 #arbitrary
for prefix in ptrace:
if prefix != previous_prefix:
new_trace.append(prefix)
previous_prefix = prefix
positive_new.add(tuple(new_trace))
for ntrace in negative:
new_trace = list()
previous_prefix = 100 #arbitrary
for prefix in ntrace:
if prefix != previous_prefix:
new_trace.append(prefix)
previous_prefix = prefix
negative_new.add(tuple(new_trace))
if tester.game_type=="trafficworld":
negative_to_store = set(random.sample(negative_new, 50))
else:
negative_to_store = negative_new
positive_to_store = positive_new
negative_new = negative_to_store
positive_new = positive_to_store
negative = set()
positive = set()
else:
for ptrace in positive:
new_trace = list()
for prefix in ptrace:
new_trace.append(prefix)
positive_to_store.add(tuple(new_trace))
positive_new = positive_to_store
negative_new = negative_to_store
for ntrace in negative:
new_trace = list()
for prefix in ntrace:
new_trace.append(prefix)
negative_to_store.add(tuple(new_trace))
positive_new = positive_to_store
negative_new = negative_to_store
traces_numerical = Traces(positive_new, negative_new)
traces_file = './automata_learning_utils/data/data.txt'
traces_numerical.export_traces(traces_file)
if learned == 1:
shutil.copy('./automata_learning_utils/data/rm.txt', '../experiments/craft/use_past/t2.txt')
# else:
# shutil.copy('./automata_learning/hypothesis_machine.txt', '../experiments/craft/use_past/t2.txt')
automaton_visualization_filename = al_utils.learn_automaton(traces_file,show_plots,is_SAT)
if show_plots:
subprocess.run(["xdot",automaton_visualization_filename])
# t2 is previous, t1 is new
hm_file_update = './automata_learning_utils/data/rm.txt'
# if learned==1:
# shutil.copy(hm_file_update, '../experiments/craft/use_past/t2.txt')
# else:
# shutil.copy('./automata_learning/hypothesis_machine.txt', '../experiments/craft/use_past/t2.txt')
all_traces.rm_trace_to_symbol(hm_file_update)
all_traces.fix_rmfiles(hm_file_update)
if learned == 0:
shutil.copy('./automata_learning_utils/data/rm.txt',
'../experiments/craft/use_past/t2.txt')
# the learning should happen here
tester_learned.update_hypothesis_machine_file(hm_file_update) ## NOTE WHICH TESTER IS USED
tester_learned.update_hypothesis_machine()
print("learning")
parent_path = os.path.abspath("../experiments/craft/use_past/")
os.makedirs(parent_path, exist_ok=True)
shutil.copy(hm_file_update, '../experiments/craft/use_past/t1.txt')
current_and_previous_rms = '../experiments/craft/tests/use_previous_experience.txt'
tester_current = Tester(learning_params,testing_params,current_and_previous_rms)
learned = 1
enter_loop = 0
num_conflicting_since_learn = 0
update_rm = 1
#if (len(all_traces.positive)+len(all_traces.negative))>100:
# all_traces = Traces(set(),set())
if num_episodes%learning_params.relearn_period==0:
new_traces = Traces(set(), set())
tf.reset_default_graph()
sess.close()
# Backing up the results
print('Finished iteration ',t)
# Showing results
prc_25 = list()
prc_50 = list()
prc_75 = list()
# Buffers for plots
current_step = list()
current_25 = list()
current_50 = list()
current_75 = list()
steps_plot = list()
for step in plot_dict.keys():
if len(current_step) < 10:
current_25.append(np.percentile(np.array(plot_dict[step]),25))
current_50.append(np.percentile(np.array(plot_dict[step]),50))
current_75.append(np.percentile(np.array(plot_dict[step]),75))
current_step.append(sum(plot_dict[step])/len(plot_dict[step]))
else:
current_step.pop(0)
current_25.pop(0)
current_50.pop(0)
current_75.pop(0)
current_25.append(np.percentile(np.array(plot_dict[step]),25))
current_50.append(np.percentile(np.array(plot_dict[step]),50))
current_75.append(np.percentile(np.array(plot_dict[step]),75))
current_step.append(sum(plot_dict[step])/len(plot_dict[step]))
rewards_plot.append(sum(plot_dict[step])/len(plot_dict[step]))
prc_25.append(sum(current_25)/len(current_25))
prc_50.append(sum(current_50)/len(current_50))
prc_75.append(sum(current_75)/len(current_75))
steps_plot.append(step)
time_elapsed = time.clock() - time_start
tester.plot_performance(steps_plot,prc_25,prc_50,prc_75)
tester.plot_this(steps_plot,rewards_plot)
if alg_name=="jirp":
if is_SAT==1:
algorithm_name = "jirpsat"
else:
algorithm_name = "jirprpni"
else:
algorithm_name = alg_name
for character in tester.world.tasks[0]:
if str.isdigit(character):
task_id = character
filename = ("../plotdata/") + (tester.game_type) + ("") + (task_id) + ("") + (
algorithm_name) + ".csv"
with open(filename, 'w') as f:
wr = csv.writer(f)
wr.writerows(list(plot_dict.values()))
avg_filename = ("../plotdata/") + ("avgreward_") + (tester.game_type) + ("") + (task_id) + ("") + (
algorithm_name) + ".txt"
with open(avg_filename, 'w') as f:
f.write("%s\n" % str(sum(rewards_plot) / len(rewards_plot)))
for item in rewards_plot:
f.write("%s\n" % item)
print('')
print('')
print('')
print('Time taken:')
print(time_elapsed)
| 20,473 | 0 | 69 |
0c6c0b17edd21f1c58cf9fb21359f299640e8fad | 792 | py | Python | test/models/model2.py | titulebolide/variometer | 7e5fbacdb9c403d11dd01abc6f5e20db4b922756 | [
"MIT"
] | null | null | null | test/models/model2.py | titulebolide/variometer | 7e5fbacdb9c403d11dd01abc6f5e20db4b922756 | [
"MIT"
] | null | null | null | test/models/model2.py | titulebolide/variometer | 7e5fbacdb9c403d11dd01abc6f5e20db4b922756 | [
"MIT"
] | null | null | null | import numpy as np
data_index = {
"z" : 0,
"vz" : 1
}
| 18 | 82 | 0.393939 | import numpy as np
def model(td,dt):
f = lambda X,U : np.array([
[X[0,0]+dt*X[1,0]],
[X[1,0]],
])
F = lambda X,U : np.array([
[1, dt],
[0, 1]
])
h = lambda X : np.array([
[101325*np.exp(-0.02897*9.81*X[0,0]/8.314/288)]
])
H = lambda X : np.array([
[101325*-0.02897*9.81/8.314/288*np.exp(-0.02897*9.81*X[0,0]/8.314/288), 0]
])
X = np.array([
[1000],
[0]
])
P = np.array([
[10, 0],
[0, 2]
])**2
Q = np.array([
[10, 0],
[0, 2]
])**2
R = np.array([
[1000]
])**2
def get_U_Z(td):
return np.array([[]]),np.array([[td.p_capt]])
return f, F, h, H, X, P, Q, R, get_U_Z
data_index = {
"z" : 0,
"vz" : 1
}
| 706 | 0 | 23 |
6d254d14aaaf62445723ade6a9a6073a4ff4d61f | 3,594 | py | Python | data_models/data_enums.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | 2 | 2019-12-12T20:57:37.000Z | 2021-09-29T02:59:19.000Z | data_models/data_enums.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | null | null | null | data_models/data_enums.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | null | null | null | from enum import Enum
class DataType(Enum):
"""
All variants of possible data types in a chart.
"""
Integer = 0
IntegerSum = 1
Float = 2
FloatSum = 3
String = 4
DateTime = 5
Date = 6
Time = 7
Daytime = 8
Count = 9
Weekday = 10
Day = 11
Month = 12
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
def string_to_data_type(data_type_string):
"""
Converts a given string in a object of this enum.
:param data_type_string: The string that represents an enum value.
:type data_type_string: str
:return: The data type corresponding to the string.
:rtype: DataType
"""
data_type_string = data_type_string.lower()
if "integer" == data_type_string:
return DataType.Integer
elif "float" == data_type_string:
return DataType.Float
elif "varchar" in data_type_string:
return DataType.String
elif "datetime" == data_type_string:
return DataType.DateTime
elif "time" == data_type_string:
return DataType.Time
elif "date" == data_type_string:
return DataType.Date
elif "weekday" == data_type_string:
return DataType.Weekday
elif "month" == data_type_string:
return DataType.Month
elif "daytime" == data_type_string:
return DataType.Daytime
elif "string" == data_type_string:
return DataType.String
elif "day" == data_type_string:
return DataType.Day
@staticmethod
def int_to_weekday(weekday):
"""
Converts a given int to a weekday.
:param weekday: The int of the weekday. 0 is Monday.
:type weekday: int
:return: The string for the weekday.
:rtype: str
"""
if not 0 <= weekday <= 6:
return ""
return ["Mon.", "Tue.", "Wed.", "Thu.", "Fri.", "Sat.", "Sun."][weekday]
@staticmethod
def int_to_month(month):
"""
Converts a given int to a month.
:param month: The int of the weekday. 1 is January.
:type month: int
:return: The string for the month.
:rtype: str
"""
if not 1 <= month <= 12:
return ""
return ["Jan.", "Feb.", "Mar.", "Apr.", "May", "Jun.", "Jul.", "Aug.", "Sept.", "Oct.", "Nov.", "Dec."][month - 1]
@staticmethod
def int_to_daytime(daytime):
"""
Converts a given int to a daytime.
:param daytime: The int of the weekday. 0 is Morning.
:type daytime: int
:return: The string for the daytime.
:rtype: str
"""
if not 0 <= daytime <= 3:
return ""
return ["Night", "Morning", "Afternoon", "Evening"][daytime]
class DataSelectionState(Enum):
"""
Which state has a data object.
"""
Nothing = 0,
Selected = 1,
Highlighted = 2
| 28.078125 | 122 | 0.581247 | from enum import Enum
class DataType(Enum):
"""
All variants of possible data types in a chart.
"""
Integer = 0
IntegerSum = 1
Float = 2
FloatSum = 3
String = 4
DateTime = 5
Date = 6
Time = 7
Daytime = 8
Count = 9
Weekday = 10
Day = 11
Month = 12
@staticmethod
def is_number(data_type):
return data_type in [DataType.Integer, DataType.Float, DataType.IntegerSum, DataType.FloatSum, DataType.Count]
@staticmethod
def is_sum(data_type):
return data_type in [DataType.IntegerSum, DataType.FloatSum]
@staticmethod
def is_datetime(data_type):
return data_type in [DataType.DateTime, DataType.Date, DataType.Time, DataType.Day]
@staticmethod
def is_string(data_type):
return data_type in [DataType.String, DataType.Weekday, DataType.Daytime, DataType.Month]
@staticmethod
def string_to_data_type(data_type_string):
"""
Converts a given string in a object of this enum.
:param data_type_string: The string that represents an enum value.
:type data_type_string: str
:return: The data type corresponding to the string.
:rtype: DataType
"""
data_type_string = data_type_string.lower()
if "integer" == data_type_string:
return DataType.Integer
elif "float" == data_type_string:
return DataType.Float
elif "varchar" in data_type_string:
return DataType.String
elif "datetime" == data_type_string:
return DataType.DateTime
elif "time" == data_type_string:
return DataType.Time
elif "date" == data_type_string:
return DataType.Date
elif "weekday" == data_type_string:
return DataType.Weekday
elif "month" == data_type_string:
return DataType.Month
elif "daytime" == data_type_string:
return DataType.Daytime
elif "string" == data_type_string:
return DataType.String
elif "day" == data_type_string:
return DataType.Day
@staticmethod
def int_to_weekday(weekday):
"""
Converts a given int to a weekday.
:param weekday: The int of the weekday. 0 is Monday.
:type weekday: int
:return: The string for the weekday.
:rtype: str
"""
if not 0 <= weekday <= 6:
return ""
return ["Mon.", "Tue.", "Wed.", "Thu.", "Fri.", "Sat.", "Sun."][weekday]
@staticmethod
def int_to_month(month):
"""
Converts a given int to a month.
:param month: The int of the weekday. 1 is January.
:type month: int
:return: The string for the month.
:rtype: str
"""
if not 1 <= month <= 12:
return ""
return ["Jan.", "Feb.", "Mar.", "Apr.", "May", "Jun.", "Jul.", "Aug.", "Sept.", "Oct.", "Nov.", "Dec."][month - 1]
@staticmethod
def int_to_daytime(daytime):
"""
Converts a given int to a daytime.
:param daytime: The int of the weekday. 0 is Morning.
:type daytime: int
:return: The string for the daytime.
:rtype: str
"""
if not 0 <= daytime <= 3:
return ""
return ["Night", "Morning", "Afternoon", "Evening"][daytime]
class DataSelectionState(Enum):
"""
Which state has a data object.
"""
Nothing = 0,
Selected = 1,
Highlighted = 2
class ListAction(Enum):
removed = 0,
added = 1,
moved = 2,
| 393 | 49 | 127 |
63762b461f6a641887b8d0a81ab619e80c271638 | 2,836 | py | Python | insta_clone/migrations/0001_initial.py | olamijinadebayo/instagram | 12b6aaafce26bdddf6ea92cc5b6ee1ea2c954898 | [
"MIT"
] | null | null | null | insta_clone/migrations/0001_initial.py | olamijinadebayo/instagram | 12b6aaafce26bdddf6ea92cc5b6ee1ea2c954898 | [
"MIT"
] | null | null | null | insta_clone/migrations/0001_initial.py | olamijinadebayo/instagram | 12b6aaafce26bdddf6ea92cc5b6ee1ea2c954898 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-15 15:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 41.705882 | 133 | 0.593089 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-15 15:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(max_length=60)),
('post_image', models.ImageField(blank=True, upload_to='images/')),
('image_caption', models.TextField()),
('comments', models.CharField(max_length=50)),
('likes', models.ManyToManyField(blank=True, related_name='likes', to=settings.AUTH_USER_MODEL)),
('profile', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, upload_to='avatar/')),
('bio', models.TextField()),
('first_name', models.CharField(max_length=30, null=True)),
('last_name', models.CharField(max_length=30, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WelcomeEmailRecipients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insta_clone.Image'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 0 | 2,593 | 23 |
8112992985872d379d5892603b35dd56bc7c0ac4 | 2,836 | py | Python | cyber-security/components/splunk-query/cli.py | uk-gov-mirror/alphagov.tech-ops | abfcce8f2721e2203bb3c049abd9609f677e9a35 | [
"MIT"
] | 9 | 2019-01-05T19:30:20.000Z | 2021-06-14T08:11:20.000Z | cyber-security/components/splunk-query/cli.py | uk-gov-mirror/alphagov.tech-ops | abfcce8f2721e2203bb3c049abd9609f677e9a35 | [
"MIT"
] | 59 | 2019-01-07T17:10:42.000Z | 2022-01-11T11:28:03.000Z | cyber-security/components/splunk-query/cli.py | uk-gov-mirror/alphagov.tech-ops | abfcce8f2721e2203bb3c049abd9609f677e9a35 | [
"MIT"
] | 5 | 2019-08-29T14:00:04.000Z | 2021-11-25T13:48:24.000Z | #!/usr/bin/env python3
"""
Command line interface for the testing Splunk data ingestion.
"""
import json
import os
import sys
from datetime import datetime
from time import sleep
import click
from search import poll_splunk
@click.group()
@cli.command()
@click.option("-S", "--sleeptime", type=int, default=1)
@click.option("-t", "--timeout", type=int, default=600)
@click.option("-s", "--search", type=str)
@click.option("-u", "--username", type=str, default="")
@click.option("-p", "--password", type=str, default="")
@click.option("-h", "--hostname", type=str, default="")
@click.option("-b", "--port", type=str, default="8089")
@click.option("-o", "--outputlogs", type=bool, default=False)
@click.option("-m", "--match", type=str, default="")
@click.option("-d", "--debug", type=bool, default=False)
if __name__ == "__main__":
cli()
| 26.754717 | 87 | 0.586742 | #!/usr/bin/env python3
"""
Command line interface for the testing Splunk data ingestion.
"""
import json
import os
import sys
from datetime import datetime
from time import sleep
import click
from search import poll_splunk
@click.group()
def cli():
pass
@cli.command()
@click.option("-S", "--sleeptime", type=int, default=1)
@click.option("-t", "--timeout", type=int, default=600)
@click.option("-s", "--search", type=str)
@click.option("-u", "--username", type=str, default="")
@click.option("-p", "--password", type=str, default="")
@click.option("-h", "--hostname", type=str, default="")
@click.option("-b", "--port", type=str, default="8089")
@click.option("-o", "--outputlogs", type=bool, default=False)
@click.option("-m", "--match", type=str, default="")
@click.option("-d", "--debug", type=bool, default=False)
def poll(
sleeptime: int,
timeout: int,
search: str,
username: str,
password: str,
hostname: str,
port: str,
outputlogs: bool,
match: str,
debug: bool,
):
if username != "":
os.environ["SPLUNK_USERNAME"] = username
if password != "":
os.environ["SPLUNK_PASSWORD"] = password
if hostname != "":
os.environ["SPLUNK_HOST"] = hostname
if port != "":
os.environ["SPLUNK_PORT"] = port
start_timestamp = datetime.now().timestamp()
duration = 0.0
if debug:
print("Polling Splunk to find our logs...")
while duration < timeout:
duration = datetime.now().timestamp() - start_timestamp
if debug:
print(f"Current duration: {duration:.2f} seconds")
print(f"Timeout: {timeout} seconds")
start_of_search = datetime.now().timestamp()
splunk_results = poll_splunk(search)
end_of_search = datetime.now().timestamp()
diff = end_of_search - start_of_search
if debug:
print(f"Took {diff:.2f} seconds to query Splunk")
success = False
len_splunk_results = len(splunk_results)
if match == "":
if len_splunk_results != 0:
success = True
else:
for log in splunk_results:
if match in str(log):
success = True
break
if success:
if outputlogs:
print(json.dumps(splunk_results, indent=4, sort_keys=True))
total_time = duration + diff
print(f"✓ Found {len_splunk_results} log(s) in {total_time:.2f} seconds")
sys.exit(0)
if debug:
print(f"No results\nSleeping for {sleeptime} seconds...")
print("-" * 20)
sleep(sleeptime)
print(
f"❌ TIMEOUT didn't find any logs after {duration:.2f} seconds", file=sys.stderr
)
sys.exit(1)
if __name__ == "__main__":
cli()
| 1,947 | 0 | 44 |
7185b03d4729d42f596162c701c87667d17292bc | 1,421 | py | Python | kernel/drivers/drivers/bltouch/command_getter.py | DeVinci-Innovation-Center/Alfred | 0310481b19304a036191ec58a3e38631956aad7d | [
"MIT"
] | null | null | null | kernel/drivers/drivers/bltouch/command_getter.py | DeVinci-Innovation-Center/Alfred | 0310481b19304a036191ec58a3e38631956aad7d | [
"MIT"
] | null | null | null | kernel/drivers/drivers/bltouch/command_getter.py | DeVinci-Innovation-Center/Alfred | 0310481b19304a036191ec58a3e38631956aad7d | [
"MIT"
] | null | null | null | """Get commands from Redis, treat them, and send them to the device."""
import json
import time
from typing import Any
from redis import Redis
from redis.client import PubSub
from bltouch import sensor
class CommandGetter:
"""Gets commands over Redis."""
redis_instance: Redis
pubsub: PubSub
channel: str
def get_command(self) -> Any:
"""Get command from Redis."""
message = self.pubsub.get_message(ignore_subscribe_messages=True)
if message:
# do something with the message
print(message)
command = message["data"].decode("utf-8")
return command
def execute_command(self, command: Any):
"""Send command to device."""
command_dict: dict = json.loads(command)
if command_dict.get("function") == "activate-bltouch":
self.blt.send_command()
def loop(self):
"""Get and produce data indefinitely."""
while True:
command = self.get_command()
if command:
self.execute_command(command)
time.sleep(0.001) # be nice to the system :)
| 26.811321 | 83 | 0.627727 | """Get commands from Redis, treat them, and send them to the device."""
import json
import time
from typing import Any
from redis import Redis
from redis.client import PubSub
from bltouch import sensor
class CommandGetter:
"""Gets commands over Redis."""
redis_instance: Redis
pubsub: PubSub
channel: str
def __init__(self, redis_instance: Redis, channel: str, sensor:sensor.BLTouch):
self.redis_instance = redis_instance
self.pubsub = self.redis_instance.pubsub()
self.channel = channel
self.blt=sensor
self.pubsub.subscribe(self.channel)
def get_command(self) -> Any:
"""Get command from Redis."""
message = self.pubsub.get_message(ignore_subscribe_messages=True)
if message:
# do something with the message
print(message)
command = message["data"].decode("utf-8")
return command
def execute_command(self, command: Any):
"""Send command to device."""
command_dict: dict = json.loads(command)
if command_dict.get("function") == "activate-bltouch":
self.blt.send_command()
def loop(self):
"""Get and produce data indefinitely."""
while True:
command = self.get_command()
if command:
self.execute_command(command)
time.sleep(0.001) # be nice to the system :)
| 254 | 0 | 27 |
cb6517bd17920a0887334692cd0dda20f7c95fc9 | 2,047 | py | Python | utils/misc.py | pocket-sunflower/palyanytsya | a1055d7fa133f0ac2a41505e503b167437bd4079 | [
"Unlicense"
] | 10 | 2022-03-09T21:11:14.000Z | 2022-03-22T16:37:33.000Z | utils/misc.py | pocket-sunflower/palyanytsya | a1055d7fa133f0ac2a41505e503b167437bd4079 | [
"Unlicense"
] | 1 | 2022-03-11T15:08:00.000Z | 2022-03-23T04:57:15.000Z | utils/misc.py | pocket-sunflower/palyanytsya | a1055d7fa133f0ac2a41505e503b167437bd4079 | [
"Unlicense"
] | null | null | null | import math
import os
import socket
import sys
import time
from humanfriendly.terminal import ansi_wrap
from requests import get
from MHDDoS.methods.tools import Tools
from utils.network import NetworkUtils, IPGeolocationData
| 30.102941 | 112 | 0.651197 | import math
import os
import socket
import sys
import time
from humanfriendly.terminal import ansi_wrap
from requests import get
from MHDDoS.methods.tools import Tools
from utils.network import NetworkUtils, IPGeolocationData
class TimeInterval:
interval: float
_last_interval_timestamp: float
def __init__(self, interval: float):
self.interval = interval
self.reset()
def check_if_has_passed(self) -> bool:
time_since_last = time.perf_counter() - self._last_interval_timestamp
if time_since_last >= self.interval:
self._last_interval_timestamp = time.perf_counter()
return True
else:
return False
def reset(self) -> None:
self._last_interval_timestamp = float("-inf")
@property
def time_left(self) -> float:
time_since_last = time.perf_counter() - self._last_interval_timestamp
return max(0., self.interval - time_since_last)
def supports_complex_colors():
platform = sys.platform
supported_platform = platform != 'Pocket PC' and (platform != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
return supported_platform and is_a_tty
def print_vpn_warning():
WARNING_YELLOW = (236, 232, 26) if supports_complex_colors() else "yellow"
ip_data = IPGeolocationData.get_for_my_ip(timeout=5)
print(ansi_wrap("!!! WARNING:\n"
f" Please, MAKE SURE that you are using VPN.\n"
f" Your current data is:\n"
f" IP: {ip_data.ip}\n"
f" Country: {str.upper(ip_data.country)}\n"
f" If the data above doesn't match your physical location, you can ignore this warning.\n"
f" Stay safe! ♥\n", color=WARNING_YELLOW))
def is_valid_ipv4(address: str):
try:
socket.inet_aton(address)
return True
except socket.error:
return False
| 1,549 | 176 | 92 |
3078078d09f939d13d4533524cee3277e748252a | 5,642 | py | Python | dictionaries/scel2txt.py | imcda/ezrhy.me | c903733625821e6719d07eef7fd098e765c82a55 | [
"MIT"
] | 13 | 2019-12-23T04:09:32.000Z | 2021-06-27T04:55:09.000Z | dictionaries/scel2txt.py | imcda/ezrhy.me | c903733625821e6719d07eef7fd098e765c82a55 | [
"MIT"
] | 1 | 2017-08-01T09:43:05.000Z | 2018-12-29T05:03:07.000Z | dictionaries/scel2txt.py | DavidFnck/ezrhy.me | c903733625821e6719d07eef7fd098e765c82a55 | [
"MIT"
] | 8 | 2017-08-10T14:18:53.000Z | 2019-07-07T02:56:13.000Z | # -*- coding: utf-8 -*-
# -*- encoding:utf-8 -*-
"""
credit from
@arthor: zhongxinwang
@date: 2016-11-11
"""
import binascii
import struct
import sys
import os
import pdb
if __name__ == '__main__':
# 将要转换的词库添加在这里就可以了
path = []
full_list = ['167', '1', '76', '96', '127', '436', '154', '389', '367', '31']
sys_in = sys.argv[1]
if sys_in == 'all':
path = full_list
elif sys_in not in full_list:
print 'not a valid file path'
print full_list
sys.exit(0)
else:
path = [sys_in]
scel2txt = Scel2Txt()
cur_path = os.getcwd()
for sub_path in path:
category_path = os.path.join(cur_path, sub_path)
dir_list = os.listdir(category_path)
for _file in dir_list:
tmp_path = os.path.join(category_path, _file)
scel2txt.deal(_file, tmp_path)
# 保存结果
result = map(lambda x: unicode(x).encode("utf8"), scel2txt.GTable)
with open(tmp_path.replace(".scel", ".txt"), "w") as fout:
fout.write("\n".join(result)) | 30.010638 | 87 | 0.494505 | # -*- coding: utf-8 -*-
# -*- encoding:utf-8 -*-
"""
credit from
@arthor: zhongxinwang
@date: 2016-11-11
"""
import binascii
import struct
import sys
import os
import pdb
class Scel2Txt(object):
# 搜狗的scel词库就是保存的文本的unicode编码,每两个字节一个字符(中文汉字或者英文字母)
# 找出其每部分的偏移位置即可
# 主要两部分
# 1.全局拼音表,貌似是所有的拼音组合,字典序
# 格式为(index,len,pinyin)的列表
# index: 两个字节的整数 代表这个拼音的索引
# len: 两个字节的整数 拼音的字节长度
# pinyin: 当前的拼音,每个字符两个字节,总长len
#
# 2.汉语词组表
# 格式为(same,py_table_len,py_table,{word_len,word,ext_len,ext})的一个列表
# same: 两个字节 整数 同音词数量
# py_table_len: 两个字节 整数
# py_table: 整数列表,每个整数两个字节,每个整数代表一个拼音的索引
#
# word_len:两个字节 整数 代表中文词组字节数长度
# word: 中文词组,每个中文汉字两个字节,总长度word_len
# ext_len: 两个字节 整数 代表扩展信息的长度,好像都是10
# ext: 扩展信息 前两个字节是一个整数(不知道是不是词频) 后八个字节全是0
#
# {word_len,word,ext_len,ext} 一共重复same次 同音词 相同拼音表
def __init__(self):
# 拼音表偏移,
self.startPy = 0x1540;
# 汉语词组表偏移
self.startChinese = 0x2628;
# 全局拼音表
self.GPy_Table = {}
# 解析结果
# 元组(词频,拼音,中文词组)的列表
self.GTable = []
def byte2str(self, data):
'''将原始字节码转为字符串'''
i = 0;
length = len(data)
ret = u''
while i < length:
x = data[i] + data[i + 1]
t = unichr(struct.unpack('H', x)[0])
if t == u'\r':
ret += u'\n'
elif t != u' ':
ret += t
i += 2
return ret
def getPyTable(self, data):
# 获取拼音表
if data[0:4] != "\x9D\x01\x00\x00":
return None
data = data[4:]
pos = 0
length = len(data)
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print index,
pos += 2
l = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print l,
pos += 2
py = self.byte2str(data[pos:pos + l])
# print py
self.GPy_Table[index] = py
pos += l
def getWordPy(self, data):
# 获取一个词组的拼音
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += self.GPy_Table[index]
pos += 2
return ret
def getWord(self, data):
# 获取一个词组
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += GPy_Table[index]
pos += 2
return ret
def getChinese(self, data):
# 读取中文表
# pdb.set_trace()
pos = 0
length = len(data)
while pos < length:
# 同音词数量
same = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print '[same]:',same,
# 拼音索引表长度
pos += 2
py_table_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 拼音索引表
pos += 2
py = self.getWordPy(data[pos: pos + py_table_len])
# 中文词组
pos += py_table_len
for i in xrange(same):
# 中文词组长度
c_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 中文词组
pos += 2
word = self.byte2str(data[pos: pos + c_len])
# 扩展数据长度
pos += c_len
ext_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 词频
pos += 2
count = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 保存
# self.GTable.append((count, py, word))
self.GTable.append((word))
# 到下个词的偏移位置
pos += ext_len
def deal(self, file_name, file_path):
self.GTable = []
print 'generating...'+file_name.replace(".scel", ".txt")
with open(file_path, 'rb') as fin:
data = fin.read()
if data[0:12] != "\x40\x15\x00\x00\x44\x43\x53\x01\x01\x00\x00\x00":
print "确认你选择的是搜狗(.scel)词库?"
sys.exit(0)
# pdb.set_trace()
# print "词库名:" ,byte2str(data[0x130:0x338]).encode("utf8")#.encode('GB18030')
# print "词库类型:" ,byte2str(data[0x338:0x540]).encode("utf8")#.encode('GB18030')
# print "描述信息:" ,byte2str(data[0x540:0xd40]).encode("utf8")#.encode('GB18030')
# print "词库示例:",byte2str(data[0xd40:startPy]).encode("utf8")#.encode('GB18030')
self.getPyTable(data[self.startPy:self.startChinese])
self.getChinese(data[self.startChinese:])
if __name__ == '__main__':
# 将要转换的词库添加在这里就可以了
path = []
full_list = ['167', '1', '76', '96', '127', '436', '154', '389', '367', '31']
sys_in = sys.argv[1]
if sys_in == 'all':
path = full_list
elif sys_in not in full_list:
print 'not a valid file path'
print full_list
sys.exit(0)
else:
path = [sys_in]
scel2txt = Scel2Txt()
cur_path = os.getcwd()
for sub_path in path:
category_path = os.path.join(cur_path, sub_path)
dir_list = os.listdir(category_path)
for _file in dir_list:
tmp_path = os.path.join(category_path, _file)
scel2txt.deal(_file, tmp_path)
# 保存结果
result = map(lambda x: unicode(x).encode("utf8"), scel2txt.GTable)
with open(tmp_path.replace(".scel", ".txt"), "w") as fout:
fout.write("\n".join(result)) | 3,550 | 1,859 | 23 |
e90bd54ba2b84ce23e91bb2abcd4d5af1ff217fc | 364 | py | Python | conf/decorators.py | gureuso/turnthepage | f86f4e6e80e4a817b06cc5c777d733cf8171310e | [
"Apache-2.0"
] | 1 | 2019-04-27T13:36:26.000Z | 2019-04-27T13:36:26.000Z | conf/decorators.py | gureuso/turnthepage | f86f4e6e80e4a817b06cc5c777d733cf8171310e | [
"Apache-2.0"
] | 7 | 2020-06-05T20:21:29.000Z | 2022-03-11T23:44:41.000Z | conf/decorators.py | gureuso/turnthepage | f86f4e6e80e4a817b06cc5c777d733cf8171310e | [
"Apache-2.0"
] | null | null | null | from functools import wraps
from django.conf import settings
from django.shortcuts import redirect
| 28 | 56 | 0.725275 | from functools import wraps
from django.conf import settings
from django.shortcuts import redirect
def already_logged_in(func):
@wraps(func)
def wrapper_func(request, *args, **kwargs):
if request.user.is_authenticated:
return redirect(settings.LOGIN_REDIRECT_URL)
return func(request, *args, **kwargs)
return wrapper_func
| 241 | 0 | 23 |
7482aae1d07babedc70ce21f5a5bacf0fe4f2b1e | 243 | py | Python | src/test/examples/custom_i2c.py | spaceconcordia/csdc3 | 86d70a3eb677f23f6b38346f335b8d88d027f612 | [
"MIT"
] | null | null | null | src/test/examples/custom_i2c.py | spaceconcordia/csdc3 | 86d70a3eb677f23f6b38346f335b8d88d027f612 | [
"MIT"
] | null | null | null | src/test/examples/custom_i2c.py | spaceconcordia/csdc3 | 86d70a3eb677f23f6b38346f335b8d88d027f612 | [
"MIT"
] | null | null | null | import smbus
I2C_ADDRESS = 0x68
bus = smbus.SMBus(0)
# Set all ports in input mode
bus.write_byte(I2C_ADDRESS, 0xFF)
# Read all the input lines
high = bus.read_byte(0x1b)
low = bus.read_byte(0x1c)
value = (high << 8) + low
print value
| 13.5 | 33 | 0.707819 | import smbus
I2C_ADDRESS = 0x68
bus = smbus.SMBus(0)
# Set all ports in input mode
bus.write_byte(I2C_ADDRESS, 0xFF)
# Read all the input lines
high = bus.read_byte(0x1b)
low = bus.read_byte(0x1c)
value = (high << 8) + low
print value
| 0 | 0 | 0 |
9b54da3fecd5828075c9d6ed8fe653a72a4bcdca | 1,156 | py | Python | Robot/run_motor.py | ankithu/RaspberryPi-Robot | cd8224af8b85b3999c79999aa39941c2d52f1a61 | [
"MIT"
] | null | null | null | Robot/run_motor.py | ankithu/RaspberryPi-Robot | cd8224af8b85b3999c79999aa39941c2d52f1a61 | [
"MIT"
] | null | null | null | Robot/run_motor.py | ankithu/RaspberryPi-Robot | cd8224af8b85b3999c79999aa39941c2d52f1a61 | [
"MIT"
] | null | null | null | import time
from SunFounder_TB6612 import TB6612
import RPi.GPIO as GPIO
import time
from SunFounder_PCA9685 import Servo
print "********************************************"
print "* *"
print "* SunFounder TB6612 *"
print "* *"
print "* Connect MA to BCM17 *"
print "* Connect MB to BCM18 *"
print "* Connect PWMA to BCM27 *"
print "* Connect PWMB to BCM22 *"
print "* *"
print "********************************************"
a = Servo.Servo(4)
b = Servo.Servo(5)
Servo.Servo(4).setup()
Servo.Servo(5).setup()
#GPIO.setmode(GPIO.BCM)
#GPIO.setup((27, 22), GPIO.OUT)
#a = GPIO.PWM(27, 60)
#b = GPIO.PWM(22, 60)
#a.start(0)
#b.start(0))
motorB = TB6612.Motor(17)
motorA = TB6612.Motor(18)
motorA.debug = True
motorB.debug = True
motorA.pwm = a_speed
motorB.pwm = b_speed
delay = 0.05
motorA.forward()
motorA.seped = 100
motorB.forward()
motorB.speed = 100
| 25.688889 | 52 | 0.506055 | import time
from SunFounder_TB6612 import TB6612
import RPi.GPIO as GPIO
import time
from SunFounder_PCA9685 import Servo
print "********************************************"
print "* *"
print "* SunFounder TB6612 *"
print "* *"
print "* Connect MA to BCM17 *"
print "* Connect MB to BCM18 *"
print "* Connect PWMA to BCM27 *"
print "* Connect PWMB to BCM22 *"
print "* *"
print "********************************************"
a = Servo.Servo(4)
b = Servo.Servo(5)
Servo.Servo(4).setup()
Servo.Servo(5).setup()
#GPIO.setmode(GPIO.BCM)
#GPIO.setup((27, 22), GPIO.OUT)
#a = GPIO.PWM(27, 60)
#b = GPIO.PWM(22, 60)
#a.start(0)
#b.start(0))
def a_speed(value):
a.write(value)
def b_speed(value):
b.write(value)
motorB = TB6612.Motor(17)
motorA = TB6612.Motor(18)
motorA.debug = True
motorB.debug = True
motorA.pwm = a_speed
motorB.pwm = b_speed
delay = 0.05
motorA.forward()
motorA.seped = 100
motorB.forward()
motorB.speed = 100
| 28 | 0 | 46 |
b3a869f8d1f274e023e442902249fb4bf6a39819 | 3,503 | py | Python | cloudrail/knowledge/rules/aws/non_context_aware/ensure_no_read_only_access_policy_used_by_role_user_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | cloudrail/knowledge/rules/aws/non_context_aware/ensure_no_read_only_access_policy_used_by_role_user_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | cloudrail/knowledge/rules/aws/non_context_aware/ensure_no_read_only_access_policy_used_by_role_user_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | from typing import Dict, List
from cloudrail.knowledge.context.aws.iam.iam_group import IamGroup
from cloudrail.knowledge.context.aws.iam.iam_identity import IamIdentity
from cloudrail.knowledge.context.aws.iam.iam_user import IamUser
from cloudrail.knowledge.context.aws.iam.iam_users_login_profile import IamUsersLoginProfile
from cloudrail.knowledge.context.aws.iam.role import Role
from cloudrail.knowledge.context.aws.aws_resource import AwsResource
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
| 52.283582 | 150 | 0.695404 | from typing import Dict, List
from cloudrail.knowledge.context.aws.iam.iam_group import IamGroup
from cloudrail.knowledge.context.aws.iam.iam_identity import IamIdentity
from cloudrail.knowledge.context.aws.iam.iam_user import IamUser
from cloudrail.knowledge.context.aws.iam.iam_users_login_profile import IamUsersLoginProfile
from cloudrail.knowledge.context.aws.iam.role import Role
from cloudrail.knowledge.context.aws.aws_resource import AwsResource
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
class EnsureNoReadOnlyAccessPolicyUsedByRoleUserRule(AwsBaseRule):
def get_id(self) -> str:
return 'non_car_iam_readonlyaccess_policy'
def execute(self, env_context: AwsEnvironmentContext, parameters: Dict[ParameterType, any]) -> List[Issue]:
issues: List[Issue] = []
issue_items = self._get_iam_entities_issues(env_context.roles, env_context.users, env_context.users_login_profile)
for item in issue_items:
if isinstance(item, IamUser) and not self._is_read_only_policy(item):
violating_groups: List[IamGroup] = [group for group in item.groups if self._is_read_only_policy(group)]
issues.append(
Issue(
f'The {item.get_type()} `{item.get_friendly_name()}` inherit ReadOnlyAccess policy, '
f'via group(s) `{", ".join([group.get_friendly_name() for group in violating_groups])}` potentially'
f' risking contents in its AWS account',
violating_groups[0], violating_groups[0]))
else:
issues.append(
Issue(
f'The {item.get_type()} `{item.get_friendly_name()}` is assigned ReadOnlyAccess policy, '
f'potentially risking contents in its AWS account', item, item))
return issues
def _get_iam_entities_issues(self, roles: List[Role], users: List[IamUser], users_login_profile: List[IamUsersLoginProfile]) -> List[AwsResource]:
users_login_list = [user_name.name for user_name in users_login_profile]
issues_list = []
for role in roles:
if self._is_read_only_policy(role) and role.assume_role_policy.is_allowing_external_assume:
if role not in issues_list:
issues_list.append(role)
for user in users:
if user.name in users_login_list:
if self._is_user_or_group_has_read_only_policy(user):
if user not in issues_list:
issues_list.append(user)
return issues_list
@staticmethod
def _is_read_only_policy(item: IamIdentity) -> bool:
return any(policy.policy_name == 'ReadOnlyAccess' for policy in item.permissions_policies)
def _is_user_or_group_has_read_only_policy(self, user: IamUser) -> bool:
affected_groups = []
for group in user.groups:
if self._is_read_only_policy(group):
affected_groups.append(group)
return self._is_read_only_policy(user) or affected_groups
def should_run_rule(self, environment_context: AwsEnvironmentContext) -> bool:
return bool(environment_context.get_all_iam_entities())
| 2,502 | 225 | 23 |
eb070ce3fe05f70b88827a09d9c0e1db6c5d01a8 | 1,701 | py | Python | dpkt/dpkt-send_arp_request.py | all3g/pieces | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 34 | 2016-10-31T02:05:24.000Z | 2018-11-08T14:33:13.000Z | dpkt/dpkt-send_arp_request.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 2 | 2017-05-11T03:00:31.000Z | 2017-11-01T23:37:37.000Z | dpkt/dpkt-send_arp_request.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 21 | 2016-08-19T09:05:45.000Z | 2018-11-08T14:33:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import dpkt
import socket
import binascii
def macaddr_aton(mac_addr):
"""translate mac addr into network bits"""
return binascii.unhexlify(mac_addr.replace(':', ''))
def build_arp_packet(src_macaddr, dst_macaddr, src_ip, dst_ip):
""" forge arp packets used to poison and reset target connection """
packet = dpkt.ethernet.Ethernet()
arp = dpkt.arp.ARP()
if not src_ip:
raise Exception("src ip not found")
if not dst_ip:
raise Exception("dst ip not found")
arp.sha = macaddr_aton(src_macaddr) # source mac address
arp.tha = macaddr_aton(dst_macaddr) # destination mac address
arp.spa = socket.inet_aton(dst_ip) # source ip address
arp.tpa = socket.inet_aton(src_ip) # destination ip address
arp.op = dpkt.arp.ARP_OP_REQUEST # ARP Request
packet.src = macaddr_aton(src_macaddr)
packet.dst = macaddr_aton('ff:ff:ff:ff:ff:ff') # broadcast address
packet.type = dpkt.ethernet.ETH_TYPE_ARP
packet.data = arp
return packet
def send_arp_packet(device, src_macaddr, dst_macaddr, src_ip, dst_ip):
"""send arp request.
"""
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.SOCK_RAW)
s.bind((device, socket.SOCK_RAW))
packet = build_arp_packet(src_macaddr, dst_macaddr, src_ip, dst_ip)
s.send(str(packet))
s.close()
if __name__ == '__main__':
device = 'eth0'
src_macaddr = "00:50:56:35:5b:aa"
dst_macaddr = "00:00:00:00:00:00"
src_ip = "192.168.53.156"
dst_ip = "192.168.53.1"
send_arp_packet(device, src_macaddr, dst_macaddr, src_ip, dst_ip) | 29.327586 | 77 | 0.657848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import dpkt
import socket
import binascii
def macaddr_aton(mac_addr):
"""translate mac addr into network bits"""
return binascii.unhexlify(mac_addr.replace(':', ''))
def build_arp_packet(src_macaddr, dst_macaddr, src_ip, dst_ip):
""" forge arp packets used to poison and reset target connection """
packet = dpkt.ethernet.Ethernet()
arp = dpkt.arp.ARP()
if not src_ip:
raise Exception("src ip not found")
if not dst_ip:
raise Exception("dst ip not found")
arp.sha = macaddr_aton(src_macaddr) # source mac address
arp.tha = macaddr_aton(dst_macaddr) # destination mac address
arp.spa = socket.inet_aton(dst_ip) # source ip address
arp.tpa = socket.inet_aton(src_ip) # destination ip address
arp.op = dpkt.arp.ARP_OP_REQUEST # ARP Request
packet.src = macaddr_aton(src_macaddr)
packet.dst = macaddr_aton('ff:ff:ff:ff:ff:ff') # broadcast address
packet.type = dpkt.ethernet.ETH_TYPE_ARP
packet.data = arp
return packet
def send_arp_packet(device, src_macaddr, dst_macaddr, src_ip, dst_ip):
"""send arp request.
"""
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.SOCK_RAW)
s.bind((device, socket.SOCK_RAW))
packet = build_arp_packet(src_macaddr, dst_macaddr, src_ip, dst_ip)
s.send(str(packet))
s.close()
if __name__ == '__main__':
device = 'eth0'
src_macaddr = "00:50:56:35:5b:aa"
dst_macaddr = "00:00:00:00:00:00"
src_ip = "192.168.53.156"
dst_ip = "192.168.53.1"
send_arp_packet(device, src_macaddr, dst_macaddr, src_ip, dst_ip) | 0 | 0 | 0 |
ff39544e472e7032f1a8461696edb862bf59fb4a | 2,627 | py | Python | create_tiny_dataset.py | MelonDLI/ATSPrivacy | 2cf4bd67c9c0c69092b63dcdc3d06b33acf32812 | [
"MIT"
] | 14 | 2021-05-31T12:32:38.000Z | 2022-01-11T12:50:27.000Z | create_tiny_dataset.py | MelonDLI/ATSPrivacy | 2cf4bd67c9c0c69092b63dcdc3d06b33acf32812 | [
"MIT"
] | 1 | 2021-06-29T08:57:01.000Z | 2021-09-07T13:13:25.000Z | create_tiny_dataset.py | MelonDLI/ATSPrivacy | 2cf4bd67c9c0c69092b63dcdc3d06b33acf32812 | [
"MIT"
] | 3 | 2021-09-16T14:39:55.000Z | 2022-01-11T13:47:12.000Z | import os, sys
import torch
import torchvision
seed=23333
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
import random
random.seed(seed)
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from PIL import Image
import inversefed
import torchvision.transforms as transforms
import argparse
from autoaugment import SubPolicy
from inversefed.data.data_processing import _build_cifar100, _get_meanstd
import torch.nn.functional as F
from benchmark.comm import create_model, build_transform, preprocess, create_config
from torch.utils.data import SubsetRandomSampler
parser = argparse.ArgumentParser(description='Reconstruct some image from a trained model.')
parser.add_argument('--data', default=None, required=True, type=str, help='Vision dataset.')
opt = parser.parse_args()
# init env
setup = inversefed.utils.system_startup()
defs = inversefed.training_strategy('conservative');
if __name__ == '__main__':
main() | 34.565789 | 98 | 0.639893 | import os, sys
import torch
import torchvision
seed=23333
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
import random
random.seed(seed)
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from PIL import Image
import inversefed
import torchvision.transforms as transforms
import argparse
from autoaugment import SubPolicy
from inversefed.data.data_processing import _build_cifar100, _get_meanstd
import torch.nn.functional as F
from benchmark.comm import create_model, build_transform, preprocess, create_config
from torch.utils.data import SubsetRandomSampler
parser = argparse.ArgumentParser(description='Reconstruct some image from a trained model.')
parser.add_argument('--data', default=None, required=True, type=str, help='Vision dataset.')
opt = parser.parse_args()
# init env
setup = inversefed.utils.system_startup()
defs = inversefed.training_strategy('conservative');
def main():
trainset = torchvision.datasets.FashionMNIST('../data', train=True, download=True,
transform=transforms.Compose([
lambda x: transforms.functional.to_grayscale(x, num_output_channels=3),
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
dataset_indices = list(range(len(trainset)))
dataset_indices = dataset_indices[2000:3000]
sampler = SubsetRandomSampler(dataset_indices)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=defs.batch_size,
drop_last=False, num_workers=4, pin_memory=True, sampler=sampler)
exit(0)
if opt.data == 'cifar100':
downloaded_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
root = os.path.join(os.getenv("HOME"), 'data')
base_folder = 'cifar-100-python'
# now load the picked numpy arrays
data = list()
targets = list()
for file_name, checksum in downloaded_list:
file_path = os.path.join(root, base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
data.append(entry['data'])
if 'labels' in entry:
targets.extend(entry['labels'])
else:
targets.extend(entry['fine_labels'])
if __name__ == '__main__':
main() | 1,637 | 0 | 23 |
af5bd04716bbf03f114bcbce7bb2e94c0948e9ba | 1,886 | py | Python | Host_DM/run_stan.py | obscode/CSPMCMC | 3811d5196a99f2f0adea2b6d0e01b12d93fd9639 | [
"MIT"
] | 1 | 2019-09-30T13:57:57.000Z | 2019-09-30T13:57:57.000Z | Host_DM/run_stan.py | obscode/CSPMCMC | 3811d5196a99f2f0adea2b6d0e01b12d93fd9639 | [
"MIT"
] | null | null | null | Host_DM/run_stan.py | obscode/CSPMCMC | 3811d5196a99f2f0adea2b6d0e01b12d93fd9639 | [
"MIT"
] | null | null | null | from numpy import*
from astropy.io import ascii
from astropy.table import Table , Column
import pystan
import pickle
import config
import sys , os , string
import generate_STAN
import get_data
# go in /data/Cepheids/runs
# creates a pickle file and a table
cfg = config.config(sys.argv[1])
if len(sys.argv) > 2:
codefile = sys.argv[2]
else:
codefile = None
# PYSTAN MODEL
model = generate_STAN.generate_STAN(cfg, outfile='model.stan', codefile=codefile)
# Data
dat,extras = get_data.get_data(cfg)
# Initial guess for parameters
samplefile = cfg.sampler.sample0
if samplefile is not None:
import STANstats
c = STANstats.STANchains(samplefile)
d0 = generate_STAN.generate_init_dict(cfg, dat, extras['cephlist'])
d = []
for i in range(cfg.sampler.chains):
d.append({})
for key in d0:
d[-1][key] = random.normal(c.median(key), c.std(key))
else:
d = [generate_STAN.generate_init_dict(cfg,dat, extras['cephlist']) \
for i in range(cfg.sampler.chains)]
if __name__ == "__main__":
#___________________________________________________________________________
# FIT
fit2 = model.sampling(data=dat, iter=cfg.sampler.iter,
warmup=cfg.sampler.burn, chains=cfg.sampler.chains,
init=d)
fitres = str(fit2)
f = open('sampler.out','w')
f.write(fitres)
f.close()
#________________________________________
#Make pickle file
filename = getattr(cfg.sampler, 'output', 'traces.pickle')
samples = fit2.extract(permuted=cfg.sampler.permuted)
# Now we can add extra data to dat before saving to pickle file.
for key in extras:
dat[key] = extras[key]
if not cfg.sampler.permuted:
d = dict(data=dat, samples=samples, flatnames=fit2.flatnames)
else:
d = samples
d['data'] = dat
fout = open(filename, 'w')
pickle.dump(d, fout)
fout.close()
| 26.942857 | 81 | 0.688229 | from numpy import*
from astropy.io import ascii
from astropy.table import Table , Column
import pystan
import pickle
import config
import sys , os , string
import generate_STAN
import get_data
# go in /data/Cepheids/runs
# creates a pickle file and a table
cfg = config.config(sys.argv[1])
if len(sys.argv) > 2:
codefile = sys.argv[2]
else:
codefile = None
# PYSTAN MODEL
model = generate_STAN.generate_STAN(cfg, outfile='model.stan', codefile=codefile)
# Data
dat,extras = get_data.get_data(cfg)
# Initial guess for parameters
samplefile = cfg.sampler.sample0
if samplefile is not None:
import STANstats
c = STANstats.STANchains(samplefile)
d0 = generate_STAN.generate_init_dict(cfg, dat, extras['cephlist'])
d = []
for i in range(cfg.sampler.chains):
d.append({})
for key in d0:
d[-1][key] = random.normal(c.median(key), c.std(key))
else:
d = [generate_STAN.generate_init_dict(cfg,dat, extras['cephlist']) \
for i in range(cfg.sampler.chains)]
if __name__ == "__main__":
#___________________________________________________________________________
# FIT
fit2 = model.sampling(data=dat, iter=cfg.sampler.iter,
warmup=cfg.sampler.burn, chains=cfg.sampler.chains,
init=d)
fitres = str(fit2)
f = open('sampler.out','w')
f.write(fitres)
f.close()
#________________________________________
#Make pickle file
filename = getattr(cfg.sampler, 'output', 'traces.pickle')
samples = fit2.extract(permuted=cfg.sampler.permuted)
# Now we can add extra data to dat before saving to pickle file.
for key in extras:
dat[key] = extras[key]
if not cfg.sampler.permuted:
d = dict(data=dat, samples=samples, flatnames=fit2.flatnames)
else:
d = samples
d['data'] = dat
fout = open(filename, 'w')
pickle.dump(d, fout)
fout.close()
| 0 | 0 | 0 |
e69456a4a8e658ee5c27f5e6059ce88a43c8a0f5 | 6,036 | py | Python | generate_flu_subtrees_dataset_run.py | neherlab/treetime_validation | c9760194712396ea5f5c33a9215eddbd3d13bfc1 | [
"MIT"
] | 4 | 2019-01-28T06:47:48.000Z | 2021-04-22T16:31:37.000Z | generate_flu_subtrees_dataset_run.py | neherlab/treetime_validation | c9760194712396ea5f5c33a9215eddbd3d13bfc1 | [
"MIT"
] | 1 | 2020-04-03T14:42:11.000Z | 2020-04-03T14:42:11.000Z | generate_flu_subtrees_dataset_run.py | neherlab/treetime_validation | c9760194712396ea5f5c33a9215eddbd3d13bfc1 | [
"MIT"
] | 1 | 2020-03-25T06:58:45.000Z | 2020-03-25T06:58:45.000Z | #!/usr/bin/env python
import treetime
import numpy as np
import os,sys
import datetime
import subprocess
import re
import utility_functions_flu as flu_utils
import utility_functions_general as gen_utils
from utility_functions_beast import run_beast, read_beast_log
aln_name = "./resources/flu_H3N2/H3N2_HA_2011_2013.fasta"
tree_name = "./resources/flu_H3N2/H3N2_HA_2011_2013.nwk"
RUN_TREETIME = True
RUN_LSD = True
RUN_BEAST = True
if __name__ == "__main__":
N_leaves = int(sys.argv[1])
out_dir = sys.argv[2]
subtree_fname_suffix = sys.argv[3]
treetime_res_file = sys.argv[4]
lsd_res_file = sys.argv[5]
beast_res_file = sys.argv[6]
if len(sys.argv) > 7:
lsd_params = sys.argv[7].split("|")
else:
lsd_params = ['-c', '-r', 'a', '-v']
# Sample subtree
subtree_filename, N_leaves = sample_subtree(out_dir, N_leaves, subtree_fname_suffix)
if RUN_TREETIME:
dates = flu_utils.dates_from_flu_tree(tree_name)
myTree = treetime.TreeTime(gtr='Jukes-Cantor',
tree=subtree_filename, aln=aln_name, dates=dates,
debug=False, verbose=4)
myTree.optimize_seq_and_branch_len(reuse_branch_len=True, prune_short=True, max_iter=5, infer_gtr=False)
start = datetime.datetime.now()
myTree.run(root='best', relaxed_clock=False, max_iter=3, resolve_polytomies=True, do_marginal=False)
end = datetime.datetime.now()
if not os.path.exists(treetime_res_file):
try:
with open(treetime_res_file, 'w') as of:
of.write("#Filename,N_leaves,Tmrca,Mu,R^2(initial clock),R^2(internal nodes),Runtime\n")
except:
pass
with open(treetime_res_file, 'a') as of:
of.write("{},{},{},{},{},{},{}\n".format(
subtree_filename,
str(N_leaves),
str(myTree.tree.root.numdate),
str(myTree.date2dist.clock_rate),
str(myTree.date2dist.r_val),
str(gen_utils.internal_regress(myTree)),
str((end-start).total_seconds()) ))
print ("TreeTime done!")
else:
print ("Skip TreeTime run")
if RUN_LSD:
lsd_outdir = os.path.join(out_dir, 'LSD_out')
# run LSD for the subtree:
if not os.path.exists(lsd_outdir):
try:
os.makedirs(lsd_outdir)
except:
pass
lsd_outfile = os.path.join(lsd_outdir, os.path.split(subtree_filename)[-1].replace(".nwk", ".txt"))
datesfile = os.path.join(lsd_outdir, os.path.split(subtree_filename)[-1].replace(".nwk", ".lsd_dates.txt"))
flu_utils.create_LSD_dates_file_from_flu_tree(subtree_filename, datesfile)
runtime = gen_utils.run_LSD(subtree_filename, datesfile, lsd_outfile, lsd_params)
# parse LSD results
tmrca, mu, objective = gen_utils.parse_lsd_output(lsd_outfile)
try:
if float(mu) > 0:
if not os.path.exists(lsd_res_file):
try:
with open(lsd_res_file, 'w') as of:
of.write("#Filename,N_leaves,Tmrca,Mu,Runtime,Objective\n")
except:
pass
with open(lsd_res_file, "a") as of:
of.write(",".join([subtree_filename, str(N_leaves), tmrca, mu, runtime, objective]))
of.write("\n")
except:
pass
print ("LSD Done!")
else:
print ("Skip LSD run")
if RUN_BEAST:
_run_beast(N_leaves, subtree_filename, out_dir, beast_res_file)
| 35.093023 | 115 | 0.608847 | #!/usr/bin/env python
import treetime
import numpy as np
import os,sys
import datetime
import subprocess
import re
import utility_functions_flu as flu_utils
import utility_functions_general as gen_utils
from utility_functions_beast import run_beast, read_beast_log
aln_name = "./resources/flu_H3N2/H3N2_HA_2011_2013.fasta"
tree_name = "./resources/flu_H3N2/H3N2_HA_2011_2013.nwk"
RUN_TREETIME = True
RUN_LSD = True
RUN_BEAST = True
def _run_beast(N_leaves, subtree_filename, out_dir, res_file):
def beast_log_post_process(log_file):
df = read_beast_log(log_file, np.max(dates.values()))
if df is None or df.shape[0] < 200:
print ("Beast log {} is corrupted or BEAST run did not finish".format(log_file))
return
inferred_LH = df['likelihood'][-50:].mean()
inferred_LH_std = df['likelihood'][-50:].std()
inferred_Tmrca = df['treeModel.rootHeight'][-50:].mean()
inferred_Tmrca_std = df['treeModel.rootHeight'][-50:].std()
inferred_Mu = df['clock.rate'][-50:].mean()
inferred_Mu_std = df['clock.rate'][-50:].std()
if not os.path.exists(res_file):
try:
with open(res_file, 'w') as of:
of.write("#Filename,N_leaves,LH,LH_std,Tmrca,Tmrca_std,Mu,Mu_std\n")
except:
pass
with open(res_file, 'a') as of:
of.write("{},{},{},{},{},{},{},{}\n".format(
subtree_filename,
N_leaves,
inferred_LH,
inferred_LH_std,
inferred_Tmrca,
inferred_Tmrca_std,
inferred_Mu,
inferred_Mu_std))
dates = flu_utils.dates_from_flu_tree(subtree_filename)
beast_out_dir = os.path.join(out_dir, 'beast_out')
if not os.path.exists(beast_out_dir):
try:
os.makedirs(beast_out_dir)
except:
pass
beast_prefix = os.path.join(beast_out_dir, os.path.split(subtree_filename)[-1][:-4]) # truncate '.nwk'
run_beast(subtree_filename, aln_name, dates, beast_prefix,
template_file="./resources/beast/template_bedford_et_al_2015.xml",
log_post_process=beast_log_post_process)
def sample_subtree(out_dir, N_leaves, subtree_fname_suffix):
subtrees_dir = os.path.join(out_dir, "subtrees")
if not os.path.exists(subtrees_dir):
try:
os.makedirs(subtrees_dir)
except:
pass
subtree_fname_format = "H3N2_HA_2011_2013_{}_{}.nwk".format(N_leaves, subtree_fname_suffix)
subtree_filename = os.path.join(subtrees_dir, subtree_fname_format)
tree = flu_utils.subtree_with_same_root(tree_name, N_leaves, subtree_filename)
N_leaves = tree.count_terminals()
return subtree_filename, N_leaves
if __name__ == "__main__":
N_leaves = int(sys.argv[1])
out_dir = sys.argv[2]
subtree_fname_suffix = sys.argv[3]
treetime_res_file = sys.argv[4]
lsd_res_file = sys.argv[5]
beast_res_file = sys.argv[6]
if len(sys.argv) > 7:
lsd_params = sys.argv[7].split("|")
else:
lsd_params = ['-c', '-r', 'a', '-v']
# Sample subtree
subtree_filename, N_leaves = sample_subtree(out_dir, N_leaves, subtree_fname_suffix)
if RUN_TREETIME:
dates = flu_utils.dates_from_flu_tree(tree_name)
myTree = treetime.TreeTime(gtr='Jukes-Cantor',
tree=subtree_filename, aln=aln_name, dates=dates,
debug=False, verbose=4)
myTree.optimize_seq_and_branch_len(reuse_branch_len=True, prune_short=True, max_iter=5, infer_gtr=False)
start = datetime.datetime.now()
myTree.run(root='best', relaxed_clock=False, max_iter=3, resolve_polytomies=True, do_marginal=False)
end = datetime.datetime.now()
if not os.path.exists(treetime_res_file):
try:
with open(treetime_res_file, 'w') as of:
of.write("#Filename,N_leaves,Tmrca,Mu,R^2(initial clock),R^2(internal nodes),Runtime\n")
except:
pass
with open(treetime_res_file, 'a') as of:
of.write("{},{},{},{},{},{},{}\n".format(
subtree_filename,
str(N_leaves),
str(myTree.tree.root.numdate),
str(myTree.date2dist.clock_rate),
str(myTree.date2dist.r_val),
str(gen_utils.internal_regress(myTree)),
str((end-start).total_seconds()) ))
print ("TreeTime done!")
else:
print ("Skip TreeTime run")
if RUN_LSD:
lsd_outdir = os.path.join(out_dir, 'LSD_out')
# run LSD for the subtree:
if not os.path.exists(lsd_outdir):
try:
os.makedirs(lsd_outdir)
except:
pass
lsd_outfile = os.path.join(lsd_outdir, os.path.split(subtree_filename)[-1].replace(".nwk", ".txt"))
datesfile = os.path.join(lsd_outdir, os.path.split(subtree_filename)[-1].replace(".nwk", ".lsd_dates.txt"))
flu_utils.create_LSD_dates_file_from_flu_tree(subtree_filename, datesfile)
runtime = gen_utils.run_LSD(subtree_filename, datesfile, lsd_outfile, lsd_params)
# parse LSD results
tmrca, mu, objective = gen_utils.parse_lsd_output(lsd_outfile)
try:
if float(mu) > 0:
if not os.path.exists(lsd_res_file):
try:
with open(lsd_res_file, 'w') as of:
of.write("#Filename,N_leaves,Tmrca,Mu,Runtime,Objective\n")
except:
pass
with open(lsd_res_file, "a") as of:
of.write(",".join([subtree_filename, str(N_leaves), tmrca, mu, runtime, objective]))
of.write("\n")
except:
pass
print ("LSD Done!")
else:
print ("Skip LSD run")
if RUN_BEAST:
_run_beast(N_leaves, subtree_filename, out_dir, beast_res_file)
| 2,311 | 0 | 46 |
7c28ca7cb6c7a885f4c0c44d4302c910b0fc490a | 347 | py | Python | clamped/__init__.py | jimbaker/clamped | 1dc1c3d673628929f8b746b3e58ef609ca6cfb1f | [
"Apache-2.0"
] | 9 | 2015-10-18T18:22:17.000Z | 2020-03-18T03:04:50.000Z | clamped/__init__.py | jimbaker/clamped | 1dc1c3d673628929f8b746b3e58ef609ca6cfb1f | [
"Apache-2.0"
] | 1 | 2018-07-20T05:39:49.000Z | 2018-07-20T05:39:49.000Z | clamped/__init__.py | jimbaker/clamped | 1dc1c3d673628929f8b746b3e58ef609ca6cfb1f | [
"Apache-2.0"
] | 2 | 2015-06-02T06:59:24.000Z | 2020-02-07T21:45:22.000Z | from six.moves import urllib
from java.io import Serializable
from java.util.concurrent import Callable
from clamp import clamp_base
BarBase = clamp_base("bar")
| 16.52381 | 48 | 0.697406 | from six.moves import urllib
from java.io import Serializable
from java.util.concurrent import Callable
from clamp import clamp_base
BarBase = clamp_base("bar")
class BarClamp(BarBase, Callable, Serializable):
def __init__(self):
print "Being init-ed", self
def call(self):
print "Hello, world!"
return 42
| 76 | 27 | 77 |
09ee2317f30d7d10b1af57d8ab94ef585b89715b | 3,995 | py | Python | autoload/python/test/test_session.py | iandingx/coqide.vim | 9aac4db31435a93844a5eb43dda1509754ba0ba6 | [
"MIT"
] | null | null | null | autoload/python/test/test_session.py | iandingx/coqide.vim | 9aac4db31435a93844a5eb43dda1509754ba0ba6 | [
"MIT"
] | 1 | 2020-02-07T18:20:43.000Z | 2020-02-07T18:23:32.000Z | autoload/python/test/test_session.py | iandingx/coqide.vim | 9aac4db31435a93844a5eb43dda1509754ba0ba6 | [
"MIT"
] | 1 | 2020-02-07T18:12:19.000Z | 2020-02-07T18:12:19.000Z | '''The unit test for module `coqide.session.Session`.'''
from unittest import TestCase
from unittest.mock import patch, Mock
from coqide.session import Session
from coqide.types import Mark, Sentence
# pylint: disable=W0212,C0103,R0201
class TestSession(TestCase):
'''Test for class `coqide.session.Session`.'''
@staticmethod
def _worker_mock():
'''Return a mock for worker.
It calls the function immediately the function is submitted.'''
worker = Mock()
worker.submit.side_effect = _submit
return worker
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_constr(self, STM, CoqtopInstance):
'''Test the constructor.'''
view = Mock()
vim = Mock()
worker = Mock()
session = Session(view, vim, worker)
CoqtopInstance.assert_called_once_with()
CoqtopInstance.return_value.spawn.assert_called_once_with(
['coqtop', '-ideslave', '-main-channel', 'stdfds',
'-async-proofs', 'on'])
STM.assert_called_once_with(
CoqtopInstance.return_value, view, session._on_feedback)
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_forward_one(self, STM, _):
'''Test method `forward_one`.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
sentence = Sentence('Proof.\n', Mark(1, 1), Mark(2, 1))
stm.get_tip_stop.side_effect = [Mark(1, 1)]
vim.get_sentence_after.side_effect = [sentence]
session.forward_one()
stm.add.assert_called_once_with([sentence])
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_backward_one(self, STM, _):
'''Test method `backward_one`.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
session.backward_one()
stm.edit_at_prev.assert_called_once_with()
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_to_cursor_forward(self, STM, _):
'''Test method `to_cursor` on going forward.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
sentences = [
Sentence('', Mark(2, 3), Mark(3, 5)),
Sentence('', Mark(3, 5), Mark(4, 1)),
Sentence('', Mark(4, 1), Mark(4, 9)),
None
]
stm.get_tip_stop.side_effect = [Mark(2, 3)]
stm.get_end_stop.side_effect = [Mark(2, 3)]
vim.get_cursor.side_effect = [Mark(4, 9)]
vim.get_sentence_after.side_effect = sentences
session.to_cursor()
stm.add.assert_called_once_with(sentences[:-1])
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_to_cursor_backward(self, STM, _):
'''Test method `to_cursor` on going backward.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
stm.get_tip_stop.side_effect = [Mark(4, 9)]
stm.get_end_stop.side_effect = [Mark(4, 9)]
vim.get_cursor.side_effect = [Mark(2, 3)]
session.to_cursor()
stm.edit_at.assert_called_once_with(Mark(2, 3))
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_close(self, _, CoqtopInstance):
'''Test method `close`.'''
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
session.close()
CoqtopInstance.return_value.close.assert_called_once_with()
| 31.96 | 71 | 0.608761 | '''The unit test for module `coqide.session.Session`.'''
from unittest import TestCase
from unittest.mock import patch, Mock
from coqide.session import Session
from coqide.types import Mark, Sentence
# pylint: disable=W0212,C0103,R0201
class TestSession(TestCase):
'''Test for class `coqide.session.Session`.'''
@staticmethod
def _worker_mock():
'''Return a mock for worker.
It calls the function immediately the function is submitted.'''
def _submit(func, *args, **kwargs):
func(*args, **kwargs)
worker = Mock()
worker.submit.side_effect = _submit
return worker
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_constr(self, STM, CoqtopInstance):
'''Test the constructor.'''
view = Mock()
vim = Mock()
worker = Mock()
session = Session(view, vim, worker)
CoqtopInstance.assert_called_once_with()
CoqtopInstance.return_value.spawn.assert_called_once_with(
['coqtop', '-ideslave', '-main-channel', 'stdfds',
'-async-proofs', 'on'])
STM.assert_called_once_with(
CoqtopInstance.return_value, view, session._on_feedback)
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_forward_one(self, STM, _):
'''Test method `forward_one`.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
sentence = Sentence('Proof.\n', Mark(1, 1), Mark(2, 1))
stm.get_tip_stop.side_effect = [Mark(1, 1)]
vim.get_sentence_after.side_effect = [sentence]
session.forward_one()
stm.add.assert_called_once_with([sentence])
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_backward_one(self, STM, _):
'''Test method `backward_one`.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
session.backward_one()
stm.edit_at_prev.assert_called_once_with()
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_to_cursor_forward(self, STM, _):
'''Test method `to_cursor` on going forward.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
sentences = [
Sentence('', Mark(2, 3), Mark(3, 5)),
Sentence('', Mark(3, 5), Mark(4, 1)),
Sentence('', Mark(4, 1), Mark(4, 9)),
None
]
stm.get_tip_stop.side_effect = [Mark(2, 3)]
stm.get_end_stop.side_effect = [Mark(2, 3)]
vim.get_cursor.side_effect = [Mark(4, 9)]
vim.get_sentence_after.side_effect = sentences
session.to_cursor()
stm.add.assert_called_once_with(sentences[:-1])
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_to_cursor_backward(self, STM, _):
'''Test method `to_cursor` on going backward.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
stm.get_tip_stop.side_effect = [Mark(4, 9)]
stm.get_end_stop.side_effect = [Mark(4, 9)]
vim.get_cursor.side_effect = [Mark(2, 3)]
session.to_cursor()
stm.edit_at.assert_called_once_with(Mark(2, 3))
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_close(self, _, CoqtopInstance):
'''Test method `close`.'''
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
session.close()
CoqtopInstance.return_value.close.assert_called_once_with()
| 48 | 0 | 30 |
0f4a8214318533b88ab9211ff0e3739fdb71d5fd | 1,380 | py | Python | panbox/_configs/panmodel_configs.py | ag-ds-bubble/panbox | 8fb9854fd3c9a931b818bd51781e6d4fb046d580 | [
"MIT"
] | null | null | null | panbox/_configs/panmodel_configs.py | ag-ds-bubble/panbox | 8fb9854fd3c9a931b818bd51781e6d4fb046d580 | [
"MIT"
] | null | null | null | panbox/_configs/panmodel_configs.py | ag-ds-bubble/panbox | 8fb9854fd3c9a931b818bd51781e6d4fb046d580 | [
"MIT"
] | null | null | null | from .core_configs import *
panmodel_configs = {
'general.pandemic.name' : 'Pandemic',
#SIR
'params.sir.tpop.val' : 7_75_66_886,
'params.sir.tr.cr.es': {},
'params.sir.tr.pr.es': {},
'params.sir.tr.cr.etd': 10,
'params.sir.tr.pr.etd': 10,
'params.sir.tr.cr.val' : 0.6, # social distancing
'params.sir.tr.pr.val' : 0.2, # better handwashing
'params.sir.in.val' : 30,
'params.sir.rm.recov.es': {},
'params.sir.rm.recov.etd': 10,
'params.sir.rm.recov.val': 0.01,
'params.sir.rm.val' : 0
}
StartDateErr = '''The '_start_date' format should be only of %Y-%m-%d format.'''
ProjectionErr = '''The '_projections_till' parameter takes period only in the format of : \
1 Day/ 198 Days/ 1 Month/ 3 Months/ 1 Year/ 2 Years'''
ProjectionGranErr1 = '''The '_projection_granularity' parameter can only be \
one of : ['Days', 'Months', 'Years']'''
ProjectionGranErr2 = '''With '_start_date' as {0}, and '_projections_till' set to {1}, granularity \
of {2} is not possible. '_projection_granularity' can be only one of {3} '''
| 39.428571 | 100 | 0.500725 | from .core_configs import *
panmodel_configs = {
'general.pandemic.name' : 'Pandemic',
#SIR
'params.sir.tpop.val' : 7_75_66_886,
'params.sir.tr.cr.es': {},
'params.sir.tr.pr.es': {},
'params.sir.tr.cr.etd': 10,
'params.sir.tr.pr.etd': 10,
'params.sir.tr.cr.val' : 0.6, # social distancing
'params.sir.tr.pr.val' : 0.2, # better handwashing
'params.sir.in.val' : 30,
'params.sir.rm.recov.es': {},
'params.sir.rm.recov.etd': 10,
'params.sir.rm.recov.val': 0.01,
'params.sir.rm.val' : 0
}
StartDateErr = '''The '_start_date' format should be only of %Y-%m-%d format.'''
ProjectionErr = '''The '_projections_till' parameter takes period only in the format of : \
1 Day/ 198 Days/ 1 Month/ 3 Months/ 1 Year/ 2 Years'''
ProjectionGranErr1 = '''The '_projection_granularity' parameter can only be \
one of : ['Days', 'Months', 'Years']'''
ProjectionGranErr2 = '''With '_start_date' as {0}, and '_projections_till' set to {1}, granularity \
of {2} is not possible. '_projection_granularity' can be only one of {3} '''
| 0 | 0 | 0 |
63e6eb978c0673466d9c3f4df98abc70fc1027f4 | 1,823 | py | Python | pybluepedal/services/heart_rate.py | willful-it/py-blue-pedal | 9c43b0d1cd96b06ad05fb8cd82be557f1401310f | [
"Unlicense"
] | 1 | 2021-03-06T15:35:25.000Z | 2021-03-06T15:35:25.000Z | pybluepedal/services/heart_rate.py | willful-it/py-blue-pedal | 9c43b0d1cd96b06ad05fb8cd82be557f1401310f | [
"Unlicense"
] | null | null | null | pybluepedal/services/heart_rate.py | willful-it/py-blue-pedal | 9c43b0d1cd96b06ad05fb8cd82be557f1401310f | [
"Unlicense"
] | null | null | null | import logging
import queue
from bluepy.btle import Peripheral
from pybluepedal.common.base import BaseDelegate, BaseService
from pybluepedal.common.byte_ops import check_bit_l2r
logger = logging.getLogger("HeartRateService")
| 29.403226 | 73 | 0.642896 | import logging
import queue
from bluepy.btle import Peripheral
from pybluepedal.common.base import BaseDelegate, BaseService
from pybluepedal.common.byte_ops import check_bit_l2r
logger = logging.getLogger("HeartRateService")
class HeartRateService(BaseService):
UUID = "0000180d"
CHARACTERISTIC_MEASUREMENT = "00002a37"
def __init__(self, peripheral: Peripheral):
super().__init__(peripheral, HeartRateService.UUID)
def start_notifications(self, delegate: BaseDelegate):
"""Starts the notifications for the characteristic measurement"""
logger.debug("starting notification")
self._peripheral.setDelegate(delegate)
characteristic = self._service.getCharacteristics(
forUUID=HeartRateService.CHARACTERISTIC_MEASUREMENT)[0]
resp = self._peripheral.writeCharacteristic(
characteristic.getHandle() + 1, b"\x01\x00", True)
logger.debug(f"notification started: {resp}")
class HeartRateDelegate(BaseDelegate):
def __init__(self, producer_queue: queue.Queue):
super().__init__(producer_queue)
self._producer_queue = producer_queue
def handleNotification(self, cHandle, data):
logger.debug(f"handing notification {cHandle} {data}")
values = list(bytearray(data))
flag_field = values[0]
logger.debug(f"flag field {bin(flag_field)}")
if not check_bit_l2r(flag_field, 0):
data = {
"type": "HeartRate",
"handle": cHandle,
"value": values[1]
}
else:
data = {
"type": "HeartRate",
"handle": cHandle,
"value": values[1:]
}
self._producer_queue.put(data)
logger.debug(f"added to queue {data}")
| 840 | 654 | 99 |
6f3bf134422f1498e208f9347e591c557c68c4f5 | 4,051 | py | Python | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/languages/tests/test_language_add.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/languages/tests/test_language_add.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/languages/tests/test_language_add.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as language_utils
from unittest.mock import patch
| 43.095745 | 79 | 0.586028 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as language_utils
from unittest.mock import patch
class LanguagesAddTestCase(BaseTestGenerator):
skip_on_database = ['gpdb']
scenarios = utils.generate_scenarios('create_language',
language_utils.test_cases)
def setUp(self):
super(LanguagesAddTestCase, self).setUp()
db_user = self.server['username']
self.data = self.test_data
self.data['name'] = "language_%s" % str(uuid.uuid4())[1:8]
self.data['lanowner'] = db_user
self.server_data = parent_node_dict["database"][-1]
self.server_id = self.server_data["server_id"]
self.db_id = self.server_data['db_id']
self.db_name = self.server_data["db_name"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
def runTest(self):
"""This function will add language under test database."""
actual_status_code = ''
expected_status_code = ''
if self.is_positive_test:
response = self.create_language()
actual_status_code = response.status_code
expected_output = language_utils.verify_language(self)
expected_status_code = self.expected_data["status_code"]
self.assertDictEqual(expected_output, self.data)
else:
if hasattr(self, "missing_name"):
del self.data["name"]
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
if hasattr(self, "missing_lang_pack"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
if hasattr(self, "error_in_properties"):
with patch(self.mock_data["function_name"],
side_effect=[eval(self.mock_data["return_value"])]):
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
self.assertEqual(actual_status_code, expected_status_code)
def create_language(self):
"""This function will add language under test database."""
return self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) + '/',
data=json.dumps(self.data),
content_type='html/json')
def tearDown(self):
"""This function delete added language and
disconnect the test database."""
if self.is_positive_test or hasattr(self, "error_in_properties"):
language_utils.delete_language(
self.server, self.db_name, self.data['name'])
database_utils.disconnect_database(self, self.server_id,
self.db_id)
| 810 | 2,571 | 23 |
b8d5cbd5e4d57f50f9bbd96a1d946505afdfdf58 | 2,033 | py | Python | gs/util/compat.py | chanzuckerberg/gs | 5824f855c0e28146629cc77d10b87dbac9960c03 | [
"MIT"
] | 5 | 2019-02-06T19:15:53.000Z | 2020-12-17T16:40:20.000Z | gs/util/compat.py | chanzuckerberg/gs | 5824f855c0e28146629cc77d10b87dbac9960c03 | [
"MIT"
] | 1 | 2019-02-13T16:13:08.000Z | 2019-02-19T14:50:02.000Z | gs/util/compat.py | chanzuckerberg/gs | 5824f855c0e28146629cc77d10b87dbac9960c03 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, datetime, errno, threading
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
if USING_PYTHON2:
from multiprocessing import cpu_count
from thread import get_ident
from StringIO import StringIO
from repr import Repr
str = unicode # noqa
from ..packages.backports.functools_lru_cache import lru_cache
from ..packages.backports.shutil_get_terminal_size import get_terminal_size
from ..packages.backports.tempfile import TemporaryDirectory
else:
from threading import get_ident
from io import StringIO
from reprlib import Repr
str = str
from functools import lru_cache
from shutil import get_terminal_size
from tempfile import TemporaryDirectory
from os import makedirs, cpu_count
from statistics import median
timestamp = datetime.datetime.timestamp
| 34.457627 | 120 | 0.654697 | from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, datetime, errno, threading
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
if USING_PYTHON2:
from multiprocessing import cpu_count
from thread import get_ident
from StringIO import StringIO
from repr import Repr
str = unicode # noqa
from ..packages.backports.functools_lru_cache import lru_cache
from ..packages.backports.shutil_get_terminal_size import get_terminal_size
from ..packages.backports.tempfile import TemporaryDirectory
def makedirs(name, mode=0o777, exist_ok=False):
try:
os.makedirs(name, mode)
except OSError as e:
if not (exist_ok and e.errno == errno.EEXIST and os.path.isdir(name)):
raise
def median(data):
data = sorted(data)
n = len(data)
if n == 0:
raise Exception("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
def timestamp(dt):
if dt.tzinfo is None:
from time import mktime
return mktime((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, -1, -1, -1)) + dt.microsecond / 1e6
else:
from dateutil.tz import tzutc
return (dt - datetime.datetime(1970, 1, 1, tzinfo=tzutc())).total_seconds()
def thread_is_main():
return True if threading.current_thread().name == "MainThread" else False
else:
from threading import get_ident
from io import StringIO
from reprlib import Repr
str = str
from functools import lru_cache
from shutil import get_terminal_size
from tempfile import TemporaryDirectory
from os import makedirs, cpu_count
from statistics import median
timestamp = datetime.datetime.timestamp
def thread_is_main():
return True if threading.current_thread() is threading.main_thread() else False
| 967 | 0 | 135 |
16eb0d18d587adaecba29e31054cc2160b7c292a | 1,611 | py | Python | pcat2py/class/20fb7dcc-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/20fb7dcc-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/20fb7dcc-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
################################################################################
# 20fb7dcc-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
| 38.357143 | 186 | 0.63563 | #!/usr/bin/python
################################################################################
# 20fb7dcc-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20fb7dcc-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Registry MultiSZ
multi_sz = cli.get_reg_multi_sz(r'HKLM:\SYSTEM\CurrentControlSet\services\LanmanServer\Parameters', 'NullSessionPipes')
# Output Lines
self.output = [r'HKLM:\SYSTEM\CurrentControlSet\services\LanmanServer\Parameters', ('NullSessionPipes=')] + multi_sz
# Recommended MultiSZ
rec_multi_sz = ("NETLOGON,SAMR,LSARPC")
for sz in multi_sz:
if sz.lower() not in rec_multi_sz.lower():
self.is_compliant = False
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\services'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\services\LanmanServer'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\services\LanmanServer\Parameters'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\SYSTEM\CurrentControlSet\services\LanmanServer\Parameters' -name 'NullSessionPipes' -Type MultiString -value NETLOGON,SAMR,LSARPC")
| 1,170 | -7 | 111 |
cbb50e1d61bfd9f3545b523ccbfb485ff0d35dd8 | 2,209 | py | Python | tests/tools/calculations/test_pw.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 40 | 2017-09-25T20:22:43.000Z | 2022-02-21T02:53:41.000Z | tests/tools/calculations/test_pw.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 594 | 2017-08-08T17:28:52.000Z | 2022-03-28T13:38:10.000Z | tests/tools/calculations/test_pw.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 66 | 2017-08-08T16:58:56.000Z | 2022-03-17T10:18:43.000Z | # -*- coding: utf-8 -*-
"""Tests for the `PwCalculationTools` class."""
import numpy as np
import pytest
from aiida import orm
from aiida.common.links import LinkType
def test_pw_get_scf_accuracy(fixture_localhost, generate_calc_job_node):
"""Test the `PwCalculationTools.get_scf_accuracy` method."""
entry_point_name = 'quantumespresso.pw'
# Missing `output_trajectory` node
node = generate_calc_job_node(entry_point_name, fixture_localhost)
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
# Missing `scf_accuracy` array
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
# Missing `scf_accuracy_index` array
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.set_array('scf_accuracy', np.array([1, 1, 1, 2, 2, 2, 2, 2]))
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.set_array('scf_accuracy', np.array([1, 1, 1, 2, 2, 2, 2, 2]))
trajectory.set_array('scf_iterations', np.array([3, 5]))
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
# Invalid indices, there are only two frames
with pytest.raises(IndexError):
node.tools.get_scf_accuracy(index=2)
with pytest.raises(IndexError):
node.tools.get_scf_accuracy(index=-3)
assert np.array_equal(node.tools.get_scf_accuracy(index=0), np.array([1, 1, 1]))
assert np.array_equal(node.tools.get_scf_accuracy(index=1), np.array([2, 2, 2, 2, 2]))
assert np.array_equal(node.tools.get_scf_accuracy(index=-1), np.array([2, 2, 2, 2, 2]))
assert np.array_equal(node.tools.get_scf_accuracy(index=-2), np.array([1, 1, 1]))
| 40.163636 | 92 | 0.727026 | # -*- coding: utf-8 -*-
"""Tests for the `PwCalculationTools` class."""
import numpy as np
import pytest
from aiida import orm
from aiida.common.links import LinkType
def test_pw_get_scf_accuracy(fixture_localhost, generate_calc_job_node):
"""Test the `PwCalculationTools.get_scf_accuracy` method."""
entry_point_name = 'quantumespresso.pw'
# Missing `output_trajectory` node
node = generate_calc_job_node(entry_point_name, fixture_localhost)
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
# Missing `scf_accuracy` array
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
# Missing `scf_accuracy_index` array
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.set_array('scf_accuracy', np.array([1, 1, 1, 2, 2, 2, 2, 2]))
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.set_array('scf_accuracy', np.array([1, 1, 1, 2, 2, 2, 2, 2]))
trajectory.set_array('scf_iterations', np.array([3, 5]))
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
# Invalid indices, there are only two frames
with pytest.raises(IndexError):
node.tools.get_scf_accuracy(index=2)
with pytest.raises(IndexError):
node.tools.get_scf_accuracy(index=-3)
assert np.array_equal(node.tools.get_scf_accuracy(index=0), np.array([1, 1, 1]))
assert np.array_equal(node.tools.get_scf_accuracy(index=1), np.array([2, 2, 2, 2, 2]))
assert np.array_equal(node.tools.get_scf_accuracy(index=-1), np.array([2, 2, 2, 2, 2]))
assert np.array_equal(node.tools.get_scf_accuracy(index=-2), np.array([1, 1, 1]))
| 0 | 0 | 0 |
6d5b73ad947f1261dcb7b92c74dbe5c872b08d17 | 6,498 | py | Python | tests/unittests/test_url.py | jcohen02/radical.utils | a6ecd7a756fa8e60c819aedd82004845f3f19c5b | [
"Apache-2.0"
] | null | null | null | tests/unittests/test_url.py | jcohen02/radical.utils | a6ecd7a756fa8e60c819aedd82004845f3f19c5b | [
"Apache-2.0"
] | null | null | null | tests/unittests/test_url.py | jcohen02/radical.utils | a6ecd7a756fa8e60c819aedd82004845f3f19c5b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils as ru
from radical.utils.contrib.urlparse25 import urljoin
# ------------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
# run tests if called directly
if __name__ == "__main__":
test_contrib()
test_url_api()
test_url_scheme_issue()
test_url_issue_49()
test_url_issue_61()
test_url_issue_rs_305()
test_url_properties()
# ------------------------------------------------------------------------------
| 30.796209 | 80 | 0.401662 | #!/usr/bin/env python
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils as ru
from radical.utils.contrib.urlparse25 import urljoin
# ------------------------------------------------------------------------------
#
def test_contrib():
test_cases = [('http://a/b/c/d', '' ),
('g:h', 'g:h' ),
('http:g', 'http://a/b/c/g' ),
('http:', 'http://a/b/c/d' ),
('g', 'http://a/b/c/g' ),
('./g', 'http://a/b/c/g' ),
('g/', 'http://a/b/c/g/' ),
('/g', 'http://a/g' ),
('//g', 'http://g' ),
('?y', 'http://a/b/c/?y' ), # [1]
('g?y', 'http://a/b/c/g?y' ),
('g?y/./x', 'http://a/b/c/g?y/./x'),
('.', 'http://a/b/c/' ),
('./', 'http://a/b/c/' ),
('..', 'http://a/b/' ),
('../', 'http://a/b/' ),
('../g', 'http://a/b/g' ),
('../..', 'http://a/' ),
('../../g', 'http://a/g' ),
('../../../g', 'http://a/../g' ),
('./../g', 'http://a/b/g' ),
('./g/.', 'http://a/b/c/g/' ),
('/./g', 'http://a/./g' ),
('g/./h', 'http://a/b/c/g/h' ),
('g/../h', 'http://a/b/c/h' ),
('http:g', 'http://a/b/c/g' ),
('http:', 'http://a/b/c/d' ),
('http:?y', 'http://a/b/c/?y' ), # [1]
('http:g?y', 'http://a/b/c/g?y' ),
('http:g?y/./x', 'http://a/b/c/g?y/./x')]
# [1] https://bugs.python.org/issue18828 - open since 2013 :-/
# This test case *should* result in `http://a/b/c/d?y`
base = ''
for tc in test_cases:
url = tc[0]
check = tc[1]
result = urljoin(base, url)
if check:
assert(result == check), '%s == %s' % (result, check)
if not base:
base = result
# -------------------------------------------------------------------------
#
def test_url_api():
# test basic functionality for valid schemas
u1 = ru.Url("ssh://user:pwd@hostname.domain:9999/path")
assert u1.scheme == "ssh"
assert u1.username == "user"
assert u1.password == "pwd"
assert u1.host == "hostname.domain"
assert u1.port == int(9999)
# ------------------------------------------------------------------------------
#
def test_url_scheme_issue():
# test basic functionality for invalid schemas
u1 = ru.Url("unknownscheme://user:pwd@hostname.domain:9999/path")
assert u1.scheme == "unknownscheme"
assert u1.username == "user"
assert u1.password == "pwd"
assert u1.host == "hostname.domain"
assert u1.port == int(9999)
# ------------------------------------------------------------------------------
#
def test_url_issue_49():
# ensure correct str serialization after setting elements
url = ru.Url ("scheme://pass:user@host:123/dir/file?query#fragment")
url.set_host ('remote.host.net')
url.set_scheme ('sftp')
url.set_path ('/tmp/data')
assert str(url) == "sftp://pass:user@remote.host.net:123/tmp/data"
# ------------------------------------------------------------------------------
#
def test_url_issue_61():
# ensure correct query extraction
url = ru.Url ("advert://localhost/?dbtype=sqlite3")
assert url.query == "dbtype=sqlite3"
# ------------------------------------------------------------------------------
#
def test_url_issue_rs_305():
# This compensates
#
# >>> import os
# >>> os.path.normpath('//path//to//dir//')
# '//path/to/dir'
#
# to a normalization resulting in
#
# '/path/to/dir'
#
# as required by the SAGA spec
url1 = ru.Url ("advert://localhost/path/to/file")
url2 = ru.Url ("advert://localhost//path/to/file")
assert url1.path == url2.path
# ------------------------------------------------------------------------------
#
def test_url_properties():
# test various properties
url = ru.Url("")
assert str(url) == ""
url.scheme = "scheme"
assert str(url) == "scheme://"
assert url.get_scheme() == "scheme"
url.set_scheme("tscheme")
assert url.get_scheme() == "tscheme"
url.scheme = "scheme"
url.host = "host"
assert str(url) == "scheme://host"
assert url.get_host() == "host"
url.set_host("thost")
assert url.get_host() == "thost"
url.host = "host"
url.port = 42
assert str(url) == "scheme://host:42"
assert url.get_port() == 42
url.set_port(43)
assert url.get_port() == 43
url.port = 42
url.username = "username"
assert str(url) == "scheme://username@host:42"
assert url.get_username() == "username"
url.set_username("tusername")
assert url.get_username() == "tusername"
url.username = "username"
url.password = "password"
assert str(url) == "scheme://username:password@host:42"
assert url.get_password() == "password"
url.set_password("tpassword")
assert url.get_password() == "tpassword"
url.password = "password"
url.path = "/path/"
assert str(url) == "scheme://username:password@host:42/path/"
assert url.get_path() == "/path/"
url.set_path("tpath")
assert url.get_path() == "/tpath"
# ------------------------------------------------------------------------------
# run tests if called directly
if __name__ == "__main__":
test_contrib()
test_url_api()
test_url_scheme_issue()
test_url_issue_49()
test_url_issue_61()
test_url_issue_rs_305()
test_url_properties()
# ------------------------------------------------------------------------------
| 5,131 | 0 | 154 |
dffd07e4ef8d4ea764ecfe4a3e13d91ffedad911 | 93 | py | Python | trabalhos/apps.py | Auralcat/TrabalhoRemoto | e6294e30a0e37ec68b299ab7983f5328c1515267 | [
"MIT"
] | null | null | null | trabalhos/apps.py | Auralcat/TrabalhoRemoto | e6294e30a0e37ec68b299ab7983f5328c1515267 | [
"MIT"
] | null | null | null | trabalhos/apps.py | Auralcat/TrabalhoRemoto | e6294e30a0e37ec68b299ab7983f5328c1515267 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.5 | 33 | 0.763441 | from django.apps import AppConfig
class TrabalhosConfig(AppConfig):
name = 'trabalhos'
| 0 | 35 | 23 |
20b843e2516177293c8dc9ac88eff5618b233354 | 1,130 | py | Python | tests/cli/config/test_config_show.py | EddLabs/eddington-static | cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5 | [
"Apache-2.0"
] | null | null | null | tests/cli/config/test_config_show.py | EddLabs/eddington-static | cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5 | [
"Apache-2.0"
] | null | null | null | tests/cli/config/test_config_show.py | EddLabs/eddington-static | cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5 | [
"Apache-2.0"
] | null | null | null | from statue.cli import statue_cli
from statue.constants import ENCODING
| 31.388889 | 88 | 0.70177 | from statue.cli import statue_cli
from statue.constants import ENCODING
def test_config_show_default_configuration(mock_configuration_path, cli_runner):
text = """
This text should be written to file
It contains multiple lines
and new lines
"""
with open(mock_configuration_path.return_value, mode="w", encoding=ENCODING) as fd:
fd.write(text)
result = cli_runner.invoke(statue_cli, ["config", "show"])
assert result.exit_code == 0
assert result.output == text + "\n"
mock_configuration_path.assert_called_once_with()
def test_config_show_given_configuration(tmp_path, mock_configuration_path, cli_runner):
config_path = tmp_path / "statue.toml"
text = """
This text should be written to file
It contains multiple lines
and new lines
"""
with open(config_path, mode="w", encoding=ENCODING) as fd:
fd.write(text)
result = cli_runner.invoke(
statue_cli, ["config", "show", "--config", str(config_path)]
)
assert result.exit_code == 0
assert result.output == text + "\n"
mock_configuration_path.assert_not_called()
| 1,010 | 0 | 46 |
db6e72dfd5b85e181f7e183968d2e8b523d2deab | 685 | py | Python | django_xsede_warehouse/xcsr_db/urls.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | 1 | 2019-10-29T22:50:29.000Z | 2019-10-29T22:50:29.000Z | django_xsede_warehouse/xcsr_db/urls.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | null | null | null | django_xsede_warehouse/xcsr_db/urls.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from xcsr_db.views import *
# Define our custom URLs
# Additionally, we include login URLs for the browseable API.
urlpatterns = [
url(r'^componentsprequirement/$', ComponentSPRequirement_List.as_view(), name='componentsprequirement-list'),
url(r'^componentsprequirement/component/(?P<component>[^/]+)/spclass/(?P<spclass>[^/]+)/$', ComponentSPRequirement_Detail.as_view(), name='componentsprequirement-detail'),
url(r'^supportcontacts/$', SupportContacts_List.as_view(), name='supportcontacts-list'),
url(r'^supportcontacts/globalid/(?P<globalid>[^/]+)/$', SupportContacts_Detail.as_view(), name='supportcontacts-detail'),
]
| 57.083333 | 175 | 0.747445 | from django.conf.urls import include, url
from xcsr_db.views import *
# Define our custom URLs
# Additionally, we include login URLs for the browseable API.
urlpatterns = [
url(r'^componentsprequirement/$', ComponentSPRequirement_List.as_view(), name='componentsprequirement-list'),
url(r'^componentsprequirement/component/(?P<component>[^/]+)/spclass/(?P<spclass>[^/]+)/$', ComponentSPRequirement_Detail.as_view(), name='componentsprequirement-detail'),
url(r'^supportcontacts/$', SupportContacts_List.as_view(), name='supportcontacts-list'),
url(r'^supportcontacts/globalid/(?P<globalid>[^/]+)/$', SupportContacts_Detail.as_view(), name='supportcontacts-detail'),
]
| 0 | 0 | 0 |
452382ac31688b725addd4afe9f288bab89266d4 | 804 | py | Python | bp_acceptance_tests/level_1/__init__.py | nicholas-moreles/blaspy | c4af6258e17dd996c4b6d90bbaae15b31b8702b4 | [
"BSD-3-Clause"
] | 4 | 2015-01-25T12:44:44.000Z | 2022-03-19T08:36:19.000Z | bp_acceptance_tests/level_1/__init__.py | nicholas-moreles/blaspy | c4af6258e17dd996c4b6d90bbaae15b31b8702b4 | [
"BSD-3-Clause"
] | 7 | 2015-01-20T13:35:39.000Z | 2015-05-31T17:11:50.000Z | bp_acceptance_tests/level_1/__init__.py | nicholas-moreles/blaspy | c4af6258e17dd996c4b6d90bbaae15b31b8702b4 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .acceptance_test_amax import acceptance_test_amax
from .acceptance_test_asum import acceptance_test_asum
from .acceptance_test_axpy import acceptance_test_axpy
from .acceptance_test_copy import acceptance_test_copy
from .acceptance_test_dot import acceptance_test_dot
from .acceptance_test_nrm2 import acceptance_test_nrm2
from .acceptance_test_scal import acceptance_test_scal
from .acceptance_test_sdot import acceptance_test_sdot
from .acceptance_test_swap import acceptance_test_swap
| 38.285714 | 72 | 0.830846 | """
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .acceptance_test_amax import acceptance_test_amax
from .acceptance_test_asum import acceptance_test_asum
from .acceptance_test_axpy import acceptance_test_axpy
from .acceptance_test_copy import acceptance_test_copy
from .acceptance_test_dot import acceptance_test_dot
from .acceptance_test_nrm2 import acceptance_test_nrm2
from .acceptance_test_scal import acceptance_test_scal
from .acceptance_test_sdot import acceptance_test_sdot
from .acceptance_test_swap import acceptance_test_swap
| 0 | 0 | 0 |
94b6802896641617a4b66c4e49fe3e52b303aaca | 296 | py | Python | fartor/urls/api_v1.py | verkatech/fartor-django | 72b53fdbe7d2e8744d5bfc58907ee2ca4a031c65 | [
"MIT"
] | 6 | 2019-02-25T12:51:38.000Z | 2019-04-21T17:53:42.000Z | fartor/urls/api_v1.py | verkatech/fartor-django | 72b53fdbe7d2e8744d5bfc58907ee2ca4a031c65 | [
"MIT"
] | null | null | null | fartor/urls/api_v1.py | verkatech/fartor-django | 72b53fdbe7d2e8744d5bfc58907ee2ca4a031c65 | [
"MIT"
] | null | null | null | from django.urls import path
from fartor.apps.accounting.users.actions.login import LoginRestAPI
from fartor.apps.accounting.users.actions.self import SelfRestAPI
urlpatterns = [
# user login
path('auth/login/', LoginRestAPI.as_view()),
path('auth/self/', SelfRestAPI.as_view()),
]
| 26.909091 | 67 | 0.75 | from django.urls import path
from fartor.apps.accounting.users.actions.login import LoginRestAPI
from fartor.apps.accounting.users.actions.self import SelfRestAPI
urlpatterns = [
# user login
path('auth/login/', LoginRestAPI.as_view()),
path('auth/self/', SelfRestAPI.as_view()),
]
| 0 | 0 | 0 |
99a6869bf37c706e8fe20dec97f3665727336051 | 1,461 | py | Python | iutest/plugins/nose2plugins/duplicationremoval.py | mgland/iutest | 6bad2b9bdd696a43580741da4237fc21b2880c6c | [
"MIT"
] | 10 | 2020-09-11T12:38:37.000Z | 2021-09-24T04:21:33.000Z | iutest/plugins/nose2plugins/duplicationremoval.py | mgland/iutest | 6bad2b9bdd696a43580741da4237fc21b2880c6c | [
"MIT"
] | 4 | 2020-08-24T01:46:15.000Z | 2021-04-20T21:07:41.000Z | iutest/plugins/nose2plugins/duplicationremoval.py | mgland/iutest | 6bad2b9bdd696a43580741da4237fc21b2880c6c | [
"MIT"
] | 1 | 2021-06-29T10:24:51.000Z | 2021-06-29T10:24:51.000Z | # Copyright 2019-2020 by Wenfeng Gao, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
class TestsDuplicationRemovalHooks(object):
""" Remove potentially duplicated tests collected.
Notes:
If the DiscoveryLoader and the EggDiscoveryLoader plugins are enabled at the same time,
there will be duplicated tests discovered as they both call _find_tests_in_module() which
will discover tests no matter it is egg or not.
Since Nose2 uses alphabetical order or plugin module paths to decide which plugin
to load first but to remove duplicated test we need to ensure the plugin comes after
other discovery plugin. Thus we need to use hooks instead of plugin.
"""
def loadTestsFromName(self, event):
"""Load tests from module named by event.name.
Notes:
This is where the EggDiscoveryLoader plugin introduce the duplicated plugin.
"""
event.extraTests = self._removeDuplicate(event.extraTests)
@classmethod
| 39.486486 | 97 | 0.691307 | # Copyright 2019-2020 by Wenfeng Gao, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
class TestsDuplicationRemovalHooks(object):
""" Remove potentially duplicated tests collected.
Notes:
If the DiscoveryLoader and the EggDiscoveryLoader plugins are enabled at the same time,
there will be duplicated tests discovered as they both call _find_tests_in_module() which
will discover tests no matter it is egg or not.
Since Nose2 uses alphabetical order or plugin module paths to decide which plugin
to load first but to remove duplicated test we need to ensure the plugin comes after
other discovery plugin. Thus we need to use hooks instead of plugin.
"""
def _removeDuplicate(self, tests):
uniqueTests = []
for t in tests:
if t not in uniqueTests:
uniqueTests.append(t)
return uniqueTests
def loadTestsFromName(self, event):
"""Load tests from module named by event.name.
Notes:
This is where the EggDiscoveryLoader plugin introduce the duplicated plugin.
"""
event.extraTests = self._removeDuplicate(event.extraTests)
@classmethod
def getHooks(cls):
hook = cls()
return [("loadTestsFromName", hook)]
| 227 | 0 | 53 |
4eb7f662a1f557f8041397153aab319bbb9ba479 | 555 | py | Python | kerasltisubmission/__init__.py | into-ai/kerasltisubmission | ca258e258e208eaf90b5bc4f408d3f9904993de1 | [
"MIT"
] | 1 | 2020-03-11T19:50:14.000Z | 2020-03-11T19:50:14.000Z | kerasltisubmission/__init__.py | into-ai/kerasltisubmission | ca258e258e208eaf90b5bc4f408d3f9904993de1 | [
"MIT"
] | null | null | null | kerasltisubmission/__init__.py | into-ai/kerasltisubmission | ca258e258e208eaf90b5bc4f408d3f9904993de1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for kerasltisubmission."""
__author__ = """into-ai"""
__email__ = "introintoai@gmail.com"
__version__ = "0.4.9"
from kerasltisubmission.kerasltisubmission import Submission as _Submission
from kerasltisubmission.provider import AnyIDType as _AnyIDType
from kerasltisubmission.provider import LTIProvider as _LTIProvider
from kerasltisubmission.provider import PredictionsType as _PredictionsType
AnyIDType = _AnyIDType
LTIProvider = _LTIProvider
PredictionsType = _PredictionsType
Submission = _Submission
| 30.833333 | 75 | 0.818018 | # -*- coding: utf-8 -*-
"""Top-level package for kerasltisubmission."""
__author__ = """into-ai"""
__email__ = "introintoai@gmail.com"
__version__ = "0.4.9"
from kerasltisubmission.kerasltisubmission import Submission as _Submission
from kerasltisubmission.provider import AnyIDType as _AnyIDType
from kerasltisubmission.provider import LTIProvider as _LTIProvider
from kerasltisubmission.provider import PredictionsType as _PredictionsType
AnyIDType = _AnyIDType
LTIProvider = _LTIProvider
PredictionsType = _PredictionsType
Submission = _Submission
| 0 | 0 | 0 |
58945ac4ac01c1ece620522fc52472564757cd8b | 896 | py | Python | pypro/videos/testes/test_video.py | limberger/curso-django | 9b099a9934871c221be2018d2e80331e90bee40f | [
"Apache-2.0"
] | null | null | null | pypro/videos/testes/test_video.py | limberger/curso-django | 9b099a9934871c221be2018d2e80331e90bee40f | [
"Apache-2.0"
] | 1,012 | 2020-06-22T21:43:39.000Z | 2022-03-31T22:09:32.000Z | pypro/videos/testes/test_video.py | limberger/curso-django | 9b099a9934871c221be2018d2e80331e90bee40f | [
"Apache-2.0"
] | null | null | null | import pytest
from django.urls import reverse
from model_mommy import mommy
from pypro.django_assertions import assert_contains
from pypro.videos.models import Video
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 23.578947 | 90 | 0.774554 | import pytest
from django.urls import reverse
from model_mommy import mommy
from pypro.django_assertions import assert_contains
from pypro.videos.models import Video
@pytest.fixture
def video(db):
return mommy.make(Video)
@pytest.fixture
def resp(client, video):
return client.get(reverse('videos:video', args=(video.slug,)))
@pytest.fixture
def resp_video_nao_encontrado(client, video):
return client.get(reverse('videos:video', args=(video.slug + 'video_nao_existente',)))
def test_status_code_video_nao_encontrado(resp_video_nao_encontrado):
assert resp_video_nao_encontrado.status_code == 404
def test_status_code(resp):
assert resp.status_code == 200
def test_titulo_video(resp, video):
assert_contains(resp, video.titulo)
def test_conteudo_video(resp, video):
assert_contains(resp, f'<iframe src="https://player.vimeo.com/video/{video.vimeo_id}')
| 513 | 0 | 158 |
ed47fc21da8b4daa82b109334f67b2b74a4d033b | 13,928 | py | Python | ivy/functional/backends/jax/elementwise.py | Archymade/ivy | d44bf4e9607fa2e484b8c37e65d6ad55dcc199b2 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/jax/elementwise.py | Archymade/ivy | d44bf4e9607fa2e484b8c37e65d6ad55dcc199b2 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/jax/elementwise.py | Archymade/ivy | d44bf4e9607fa2e484b8c37e65d6ad55dcc199b2 | [
"Apache-2.0"
] | null | null | null | # global
import jax
import jax.numpy as jnp
from typing import Optional
# local
import ivy
from ivy.functional.backends.jax import JaxArray
# Extra #
# ------#
| 23.408403 | 61 | 0.569213 | # global
import jax
import jax.numpy as jnp
from typing import Optional
# local
import ivy
from ivy.functional.backends.jax import JaxArray
def bitwise_left_shift(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
return jnp.left_shift(x1, x2)
def add(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.add(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_xor(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.bitwise_xor(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def exp(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.exp(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def expm1(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.expm1(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_invert(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.bitwise_not(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_and(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.bitwise_and(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def ceil(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.ceil(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def floor(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.floor(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def isfinite(x: JaxArray)\
-> JaxArray:
return jnp.isfinite(x)
def asin(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arcsin(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def isinf(x: JaxArray)\
-> JaxArray:
return jnp.isinf(x)
def equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def greater(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) \
-> JaxArray:
ret = jnp.greater(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def greater_equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.greater_equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def less_equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.less_equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def asinh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arcsinh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sign(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sign(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sqrt(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sqrt(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def cosh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.cosh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log10(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log10(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log2(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log2(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log1p(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log1p(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def multiply(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.multiply(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def isnan(x: JaxArray)\
-> JaxArray:
return jnp.isnan(x)
def less(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.less(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def cos(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.cos(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_xor(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_xor(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_or(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_or(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_and(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_and(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_not(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_not(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def divide(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.divide(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def acos(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arccos(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def acosh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arccosh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sin(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sin(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def negative(x: JaxArray,
out: Optional[JaxArray] = None) -> JaxArray:
ret = jnp.negative(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def not_equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) \
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.not_equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def tanh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.tanh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def floor_divide(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.floor_divide(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_or(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) -> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.bitwise_or(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sinh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sinh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def positive(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.positive(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def square(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.square(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def pow(x1: jnp.ndarray,
x2: jnp.ndarray,
out: Optional[JaxArray] = None)\
-> jnp.ndarray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
ret = jnp.power(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def remainder(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.remainder(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def round(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.round(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def trunc(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.trunc(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def abs(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.absolute(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def subtract(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
ret = jnp.subtract(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logaddexp(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logaddexp(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_right_shift(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.right_shift(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def tan(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.tan(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def atan(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arctan(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def atanh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arctanh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def atan2(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) -> JaxArray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
ret = jnp.arctan2(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
# Extra #
# ------#
def minimum(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.minimum(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def maximum(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.maximum(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def erf(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jax.scipy.special.erf(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
| 12,343 | 0 | 1,363 |
2d7e390998324a8dbd622b0fef4a096046599ec7 | 4,332 | py | Python | pGUIDMatching190730.py | kleblanc5909/FuzzyMatchIDs | d1c5ef37ea19f08cc30193285c9d98de0ae3d5c4 | [
"MIT"
] | null | null | null | pGUIDMatching190730.py | kleblanc5909/FuzzyMatchIDs | d1c5ef37ea19f08cc30193285c9d98de0ae3d5c4 | [
"MIT"
] | 1 | 2019-03-28T18:14:31.000Z | 2019-08-09T14:28:28.000Z | pGUIDMatching190730.py | kleblanc5909/FuzzyMatchIDs | d1c5ef37ea19f08cc30193285c9d98de0ae3d5c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: Kim LeBlanc
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.
Inputs:
list1 - list of terms to search for in the master list
list2 - master list that is searched for matches over and over
"""
TopMatch = []
TopScore = []
TopRowIdx = []
for member in list1:
x=process.extractOne(member, list2)
TopMatch.append(x[0])
TopScore.append(x[1])
TopRowIdx.append(x[2])
return TopMatch, TopScore, TopRowIdx
def createRUID_List(rowIdxList, headerStr):
"""
Loops over a series containing row indices and returns a list of RUID strings.
Inputs:
rowIdxList - collection of row index values
headerStr - DataFrame header string value for column containing RUIDs
Outputs:
new list containing RUID strings
"""
RUID_List = []
for aRowIdx in rowIdxList:
workingRUID=df[headerStr].iloc[aRowIdx]
RUID_List.append(workingRUID)
return RUID_List
df = pd.read_excel("abcd_rucdr_master_forPython.xlsx")
print ('Finished reading in input file.')
#blackList=['NDAR_INV']
#for pattern in blackList:
# df['pGUID_Rutgers'] = df['pGUID_Rutgers'].replace(pattern, '')
#datasets
Mismatch_DAIC_IDs = df.iloc[1949:2201,0].dropna()
print (Mismatch_DAIC_IDs)
Mismatch_Rutgers_IDs = df.iloc[1949:2201,1].dropna()
print (Mismatch_Rutgers_IDs)
Unique_DAIC_IDs = df.iloc[1403:1948,0].dropna()
print (Unique_DAIC_IDs)
Unique_Rutgers_IDs = df.iloc[0:1403,1].dropna()
print (Unique_Rutgers_IDs)
AllRutgersIDs = df['rucdr.SUBCODE'].dropna()
AllDAIC_IDs = df['abcd.id_redcap'].dropna()
print ('About to start first match2collections.')
BestMatch_Mismatch_DtoR, BestScore_Mismatch_DtoR, BestRowIdx_Mismatch_DtoR = match2Lists(Mismatch_DAIC_IDs,AllRutgersIDs)
print ('Just finished first match2collections.')
print ('About to start second match2collections.')
BestMatch_Mismatch_RtoD, BestScore__Mismatch_RtoD, BestRowIdx_Mismatch_RtoD = match2Lists(Mismatch_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished second match2collections.')
print ('About to start third match2collections.')
BestMatch_Unique_DtoR, BestScore_Unique_DtoR, BestRowIdx_Unique_DtoR = match2Lists(Unique_DAIC_IDs, AllRutgersIDs)
print ('Just finished third match2collections.')
print ('About to start fourth match2collections.')
BestMatch_Unique_RtoD, BestScore_Unique_RtoD, BestRowIdx_Unique_RtoD = match2Lists(Unique_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished fourth match2collections.')
df['BestMatchdf_Mismatch_DtoR']=pd.Series(BestMatch_Mismatch_DtoR)
df['BestScoredf_Mismatch_DtoR']=pd.Series(BestScore_Mismatch_DtoR)
df['BestRowIdxdf_Mismatch_DtoR']=pd.Series(BestRowIdx_Mismatch_DtoR)
df['BestMatchdf_Mismatch_RtoD']=pd.Series(BestMatch_Mismatch_RtoD)
df['BestScoredf_Mismatch_RtoD']=pd.Series(BestScore__Mismatch_RtoD)
df['BestRowIdxdf_Mismatch_RtoD']=pd.Series(BestRowIdx_Mismatch_RtoD)
df['BestMatchdf_Unique_DtoR']=pd.Series(BestMatch_Unique_DtoR)
df['BestScoredf_Unique_DtoR']=pd.Series(BestScore_Unique_DtoR)
df['BestRowIdxdf_Unique_DtoR']=pd.Series(BestRowIdx_Unique_DtoR)
df['BestMatchdf_Unique_RtoD']=pd.Series(BestMatch_Unique_RtoD)
df['BestScoredf_Unique_RtoD']=pd.Series(BestScore_Unique_RtoD)
df['BestRowIdxdf_Unique_RtoD']=pd.Series(BestRowIdx_Unique_RtoD)
InvCode_Mismatch_DtoR_List = createRUID_List(BestRowIdx_Mismatch_DtoR, 'Inventory_Code')
df['InvCode_Mismatch_DtoR']=pd.Series(InvCode_Mismatch_DtoR_List)
InvCode_Mismatch_RtoD_List = createRUID_List(BestRowIdx_Mismatch_RtoD, 'Inventory_Code')
df['InvCode_Mismatch_RtoD']=pd.Series(InvCode_Mismatch_RtoD_List)
InvCode_Unique_DtoR_List = createRUID_List(BestRowIdx_Unique_DtoR, 'Inventory_Code')
df['InvCode_Unique_DtoR']=pd.Series(InvCode_Unique_DtoR_List)
InvCode_Unique_RtoD_List = createRUID_List(BestRowIdx_Unique_RtoD, 'Inventory_Code')
df['InvCode_Unique_RtoD']=pd.Series(InvCode_Unique_RtoD_List)
writer = pd.ExcelWriter('FuzzyMatchedIDsOne_190730.xlsx')
df.to_excel(writer,'Sheet1')
writer.save()
| 40.867925 | 125 | 0.767082 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: Kim LeBlanc
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.
Inputs:
list1 - list of terms to search for in the master list
list2 - master list that is searched for matches over and over
"""
TopMatch = []
TopScore = []
TopRowIdx = []
for member in list1:
x=process.extractOne(member, list2)
TopMatch.append(x[0])
TopScore.append(x[1])
TopRowIdx.append(x[2])
return TopMatch, TopScore, TopRowIdx
def createRUID_List(rowIdxList, headerStr):
"""
Loops over a series containing row indices and returns a list of RUID strings.
Inputs:
rowIdxList - collection of row index values
headerStr - DataFrame header string value for column containing RUIDs
Outputs:
new list containing RUID strings
"""
RUID_List = []
for aRowIdx in rowIdxList:
workingRUID=df[headerStr].iloc[aRowIdx]
RUID_List.append(workingRUID)
return RUID_List
df = pd.read_excel("abcd_rucdr_master_forPython.xlsx")
print ('Finished reading in input file.')
#blackList=['NDAR_INV']
#for pattern in blackList:
# df['pGUID_Rutgers'] = df['pGUID_Rutgers'].replace(pattern, '')
#datasets
Mismatch_DAIC_IDs = df.iloc[1949:2201,0].dropna()
print (Mismatch_DAIC_IDs)
Mismatch_Rutgers_IDs = df.iloc[1949:2201,1].dropna()
print (Mismatch_Rutgers_IDs)
Unique_DAIC_IDs = df.iloc[1403:1948,0].dropna()
print (Unique_DAIC_IDs)
Unique_Rutgers_IDs = df.iloc[0:1403,1].dropna()
print (Unique_Rutgers_IDs)
AllRutgersIDs = df['rucdr.SUBCODE'].dropna()
AllDAIC_IDs = df['abcd.id_redcap'].dropna()
print ('About to start first match2collections.')
BestMatch_Mismatch_DtoR, BestScore_Mismatch_DtoR, BestRowIdx_Mismatch_DtoR = match2Lists(Mismatch_DAIC_IDs,AllRutgersIDs)
print ('Just finished first match2collections.')
print ('About to start second match2collections.')
BestMatch_Mismatch_RtoD, BestScore__Mismatch_RtoD, BestRowIdx_Mismatch_RtoD = match2Lists(Mismatch_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished second match2collections.')
print ('About to start third match2collections.')
BestMatch_Unique_DtoR, BestScore_Unique_DtoR, BestRowIdx_Unique_DtoR = match2Lists(Unique_DAIC_IDs, AllRutgersIDs)
print ('Just finished third match2collections.')
print ('About to start fourth match2collections.')
BestMatch_Unique_RtoD, BestScore_Unique_RtoD, BestRowIdx_Unique_RtoD = match2Lists(Unique_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished fourth match2collections.')
df['BestMatchdf_Mismatch_DtoR']=pd.Series(BestMatch_Mismatch_DtoR)
df['BestScoredf_Mismatch_DtoR']=pd.Series(BestScore_Mismatch_DtoR)
df['BestRowIdxdf_Mismatch_DtoR']=pd.Series(BestRowIdx_Mismatch_DtoR)
df['BestMatchdf_Mismatch_RtoD']=pd.Series(BestMatch_Mismatch_RtoD)
df['BestScoredf_Mismatch_RtoD']=pd.Series(BestScore__Mismatch_RtoD)
df['BestRowIdxdf_Mismatch_RtoD']=pd.Series(BestRowIdx_Mismatch_RtoD)
df['BestMatchdf_Unique_DtoR']=pd.Series(BestMatch_Unique_DtoR)
df['BestScoredf_Unique_DtoR']=pd.Series(BestScore_Unique_DtoR)
df['BestRowIdxdf_Unique_DtoR']=pd.Series(BestRowIdx_Unique_DtoR)
df['BestMatchdf_Unique_RtoD']=pd.Series(BestMatch_Unique_RtoD)
df['BestScoredf_Unique_RtoD']=pd.Series(BestScore_Unique_RtoD)
df['BestRowIdxdf_Unique_RtoD']=pd.Series(BestRowIdx_Unique_RtoD)
InvCode_Mismatch_DtoR_List = createRUID_List(BestRowIdx_Mismatch_DtoR, 'Inventory_Code')
df['InvCode_Mismatch_DtoR']=pd.Series(InvCode_Mismatch_DtoR_List)
InvCode_Mismatch_RtoD_List = createRUID_List(BestRowIdx_Mismatch_RtoD, 'Inventory_Code')
df['InvCode_Mismatch_RtoD']=pd.Series(InvCode_Mismatch_RtoD_List)
InvCode_Unique_DtoR_List = createRUID_List(BestRowIdx_Unique_DtoR, 'Inventory_Code')
df['InvCode_Unique_DtoR']=pd.Series(InvCode_Unique_DtoR_List)
InvCode_Unique_RtoD_List = createRUID_List(BestRowIdx_Unique_RtoD, 'Inventory_Code')
df['InvCode_Unique_RtoD']=pd.Series(InvCode_Unique_RtoD_List)
writer = pd.ExcelWriter('FuzzyMatchedIDsOne_190730.xlsx')
df.to_excel(writer,'Sheet1')
writer.save()
| 0 | 0 | 0 |
4221ea9fa895a14d48832e0dee725162e4076885 | 7,951 | py | Python | models/deformable_modules.py | kylevedder/mvits_for_class_agnostic_od | 39558fa2478ce22988a9451647e031a3f51f78df | [
"MIT"
] | 114 | 2021-11-21T15:02:54.000Z | 2022-03-25T20:18:33.000Z | models/deformable_modules.py | kylevedder/mvits_for_class_agnostic_od | 39558fa2478ce22988a9451647e031a3f51f78df | [
"MIT"
] | 6 | 2021-11-27T15:15:51.000Z | 2022-03-24T18:23:13.000Z | models/deformable_modules.py | kylevedder/mvits_for_class_agnostic_od | 39558fa2478ce22988a9451647e031a3f51f78df | [
"MIT"
] | 11 | 2021-11-23T08:56:50.000Z | 2022-02-24T09:59:16.000Z | import copy
import torch
import torch.nn.functional as F
from torch import nn
from models.util import inverse_sigmoid
from models.ops.modules import MSDeformAttn
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 39.755 | 118 | 0.633002 | import copy
import torch
import torch.nn.functional as F
from torch import nn
from models.util import inverse_sigmoid
from models.ops.modules import MSDeformAttn
class DeformableTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index,
padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class DeformableTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
class DeformableTransformerDecoderLayer(nn.Module):
def __init__(self, d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self, tgt, query_pos, reference_points, src, src_spatial_shapes, level_start_index,
src_padding_mask=None):
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# cross attention
tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos),
reference_points,
src, src_spatial_shapes, level_start_index, src_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.forward_ffn(tgt)
return tgt
class DeformableTransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.return_intermediate = return_intermediate
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.bbox_embed = None
self.class_embed = None
def forward(self, tgt, reference_points, src, src_spatial_shapes, src_level_start_index, src_valid_ratios,
query_pos=None, src_padding_mask=None):
output = tgt
intermediate = []
intermediate_reference_points = []
for lid, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] \
* torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None]
output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index,
src_padding_mask)
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
tmp = self.bbox_embed[lid](output)
if reference_points.shape[-1] == 4:
new_reference_points = tmp + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
assert reference_points.shape[-1] == 2
new_reference_points = tmp
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(intermediate_reference_points)
return output, reference_points
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 6,836 | 458 | 168 |
18a68efefd06ff91dc65efb8330e3e1bfacccf5e | 1,590 | py | Python | scripts/eu4/rulers.py | ajul/pyradox | 757f7f66fcc9474281942e6c1352b80f9f16cc98 | [
"MIT"
] | 20 | 2015-03-17T22:40:28.000Z | 2022-01-24T19:44:17.000Z | scripts/eu4/rulers.py | ajul/pyradox | 757f7f66fcc9474281942e6c1352b80f9f16cc98 | [
"MIT"
] | 21 | 2015-02-06T06:42:14.000Z | 2022-01-03T23:23:17.000Z | scripts/eu4/rulers.py | ajul/pyradox | 757f7f66fcc9474281942e6c1352b80f9f16cc98 | [
"MIT"
] | 10 | 2017-04-05T19:46:18.000Z | 2022-03-16T10:57:37.000Z | import _initpath
import os
import pyradox
import load.country
import load.province
# Load countries and provinces.
countries = load.country.get_countries()
leader_keys = ('fire', 'shock', 'manuever', 'siege')
s = '{|class = "wikitable sortable"\n'
s += "! Leader !! Country !! Date !! {{icon|adm}} !! {{icon|dip}} !! {{icon|mil}} !! Total !! {{icon|leader fire}} !! {{icon|leader shock}} !! {{icon|leader maneuver}} !! {{icon|leader siege}} \n"
for tag, country in countries.items():
country_name = load.country.get_country_name(tag)
if country_name is None: print('Missing localisation: ' + tag)
for date, data in country.items():
if not isinstance(date, pyradox.Date): continue
for ruler in data.find_all('monarch'):
if "leader" in ruler:
for key in leader_keys:
ruler[key] = str(ruler['leader'][key])
else:
for key in leader_keys:
ruler[key] = ''
if 'regent' in ruler and ruler['regent']: ruler['name'] += ' (regent)'
# broken file
if not isinstance(ruler['mil'], int): ruler['mil'] = 0
ruler['total'] = ruler['adm'] + ruler['dip'] + ruler['mil']
ruler["country"] = country_name
ruler["date"] = date
s += output_row(ruler)
s += '|}\n'
print(s)
| 33.829787 | 196 | 0.557233 | import _initpath
import os
import pyradox
import load.country
import load.province
def output_row(data):
result = "|-\n"
result += "| %(name)s || %(country)s || %(date)s || %(adm)s || %(dip)s || %(mil)s || %(total)s || %(fire)s || %(shock)s || %(manuever)s || %(siege)s \n" % data
return result
# Load countries and provinces.
countries = load.country.get_countries()
leader_keys = ('fire', 'shock', 'manuever', 'siege')
s = '{|class = "wikitable sortable"\n'
s += "! Leader !! Country !! Date !! {{icon|adm}} !! {{icon|dip}} !! {{icon|mil}} !! Total !! {{icon|leader fire}} !! {{icon|leader shock}} !! {{icon|leader maneuver}} !! {{icon|leader siege}} \n"
for tag, country in countries.items():
country_name = load.country.get_country_name(tag)
if country_name is None: print('Missing localisation: ' + tag)
for date, data in country.items():
if not isinstance(date, pyradox.Date): continue
for ruler in data.find_all('monarch'):
if "leader" in ruler:
for key in leader_keys:
ruler[key] = str(ruler['leader'][key])
else:
for key in leader_keys:
ruler[key] = ''
if 'regent' in ruler and ruler['regent']: ruler['name'] += ' (regent)'
# broken file
if not isinstance(ruler['mil'], int): ruler['mil'] = 0
ruler['total'] = ruler['adm'] + ruler['dip'] + ruler['mil']
ruler["country"] = country_name
ruler["date"] = date
s += output_row(ruler)
s += '|}\n'
print(s)
| 202 | 0 | 23 |
1bca6094cad4a7132248971826177a3815318908 | 45 | py | Python | src/about/__init__.py | anmquangw/viu-upload-file | bfbff413cc92e454226fced5fe504b7cebc6c102 | [
"MIT"
] | null | null | null | src/about/__init__.py | anmquangw/viu-upload-file | bfbff413cc92e454226fced5fe504b7cebc6c102 | [
"MIT"
] | null | null | null | src/about/__init__.py | anmquangw/viu-upload-file | bfbff413cc92e454226fced5fe504b7cebc6c102 | [
"MIT"
] | null | null | null | default_app_config = 'about.apps.AboutConfig' | 45 | 45 | 0.844444 | default_app_config = 'about.apps.AboutConfig' | 0 | 0 | 0 |
42a5696b4c285f14f0652982e0cb721acefdfe48 | 934 | py | Python | TestForms/testForms.py | icyplayer/ConcreteMaths | 8df7d37008689e272482740a77c652a9bf36b650 | [
"MIT"
] | null | null | null | TestForms/testForms.py | icyplayer/ConcreteMaths | 8df7d37008689e272482740a77c652a9bf36b650 | [
"MIT"
] | null | null | null | TestForms/testForms.py | icyplayer/ConcreteMaths | 8df7d37008689e272482740a77c652a9bf36b650 | [
"MIT"
] | null | null | null | from forms.forms import Form
vecLst = [[1, 0, 2, 0], # (x0, y0, z0)
[0, 1, 2, 0], # (x1, y1, z1)
[2, 0, 1, 0],] # (x2, y2, z2)
# validation test
def test1():
""" Simple test """
dim = 2
form = Form(vecLst, dim)
print(form)
# X, Y, Z format
print()
form.dim = 3
print(form)
# X%02d format
print()
form.dim = 4
print(form)
# self defined format
print()
form.dim = 4
print(form.genFromWithHeader(["X", "Y", "Z", "W"]))
# print("test 1")
# test1()
print("test 2")
# test non-transpose case
vecLstNonTrans = [[1,2,3],
[0,0,1],
[0,1,0],]
dim = 2
formNonTrans = Form(vecLstNonTrans, dim, False)
print("form (non-transposed): dim=%d" % formNonTrans.getDim())
print(formNonTrans)
print()
formNonTrans.setDim(3)
print("form (non-transposed): dim=%d" % formNonTrans.getDim())
print(formNonTrans)
| 17.961538 | 62 | 0.536403 | from forms.forms import Form
vecLst = [[1, 0, 2, 0], # (x0, y0, z0)
[0, 1, 2, 0], # (x1, y1, z1)
[2, 0, 1, 0],] # (x2, y2, z2)
# validation test
def test1():
""" Simple test """
dim = 2
form = Form(vecLst, dim)
print(form)
# X, Y, Z format
print()
form.dim = 3
print(form)
# X%02d format
print()
form.dim = 4
print(form)
# self defined format
print()
form.dim = 4
print(form.genFromWithHeader(["X", "Y", "Z", "W"]))
# print("test 1")
# test1()
print("test 2")
# test non-transpose case
vecLstNonTrans = [[1,2,3],
[0,0,1],
[0,1,0],]
dim = 2
formNonTrans = Form(vecLstNonTrans, dim, False)
print("form (non-transposed): dim=%d" % formNonTrans.getDim())
print(formNonTrans)
print()
formNonTrans.setDim(3)
print("form (non-transposed): dim=%d" % formNonTrans.getDim())
print(formNonTrans)
| 0 | 0 | 0 |
6b3ee4c11e11b33c84586221e5c5bd4da14ca91b | 182 | py | Python | Chapter10/study10_7.py | nicuo/Python3 | 4cee68030eab62eca92c0a72d2a3d79d92764072 | [
"CNRI-Python"
] | null | null | null | Chapter10/study10_7.py | nicuo/Python3 | 4cee68030eab62eca92c0a72d2a3d79d92764072 | [
"CNRI-Python"
] | null | null | null | Chapter10/study10_7.py | nicuo/Python3 | 4cee68030eab62eca92c0a72d2a3d79d92764072 | [
"CNRI-Python"
] | null | null | null | from datetime import date
from datetime import timedelta
my_day = date(2017,8,1)
print (my_day)
print (my_day.weekday())
party_day = my_day + timedelta(days=10000)
print(party_day)
| 20.222222 | 42 | 0.774725 | from datetime import date
from datetime import timedelta
my_day = date(2017,8,1)
print (my_day)
print (my_day.weekday())
party_day = my_day + timedelta(days=10000)
print(party_day)
| 0 | 0 | 0 |
1a5502f6015145ed446dd94907b8122c551f716a | 3,710 | py | Python | calculate_ppl.py | wyshi/lm_privacy | ffce153a4dc19d9f61d3e288483f05b409fddda5 | [
"MIT"
] | 6 | 2021-09-09T01:40:13.000Z | 2022-03-10T20:46:36.000Z | calculate_ppl.py | wyshi/lm_privacy | ffce153a4dc19d9f61d3e288483f05b409fddda5 | [
"MIT"
] | null | null | null | calculate_ppl.py | wyshi/lm_privacy | ffce153a4dc19d9f61d3e288483f05b409fddda5 | [
"MIT"
] | null | null | null | ###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
'''
cuda:0
ppl: 16.847383872958442 for sentence My SSN is 341752., 0.0031911754608154297 seconds
cpu
ppl: 16.847387889688246 for sentence My SSN is 341752., 0.00565678596496582 seconds
python calculate_ppl.py --checkpoint model/nodp/20210408/223716/data-wikitext-2-add10b__model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-50258__bs-256__bptt-35__lr-20.0__dp-False_partial-False.pt
'''
import argparse
import torch
import torch.nn as nn
import math
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2TokenizerFast
import utils
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
# parser.add_argument('--data', type=str, default='./data/wikitext-2/',
# help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='/home/wyshi/privacy/model/nodp/model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-33278__bs-256__bptt-35__lr-20.0__dp-False.pt',
help='model checkpoint to use')
# parser.add_argument('--outf', type=str, default='generated.txt',
# help='output file for generated text')
# parser.add_argument('--words', type=int, default='1000',
# help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', type=str, default="cuda:0",
help='use CUDA')
parser.add_argument('--data_type', type=str.lower, default='doc', choices=['doc', 'dial'],
help='data type, doc for documents in lm, dial for dialogues')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device(args.cuda)
###############################################################################
# Load model
###############################################################################
with open(args.checkpoint, 'rb') as f:
model = torch.load(f, map_location=device)
model.eval()
###############################################################################
# Load tokenizer
###############################################################################
is_dial = args.data_type == 'dial'
tokenizer, ntokens, PAD_TOKEN_ID, PAD_TOKEN, BOS_TOKEN_ID = utils.load_tokenizer(is_dialog=is_dial)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
sentence = [" My SSN is 341752.", " My SSN is 123456.", " My SSN is 341753."]
tokenized_sent = [tokenizer.encode(s) for s in sentence]
t1 = time.time()
for _ in range(100):
# import pdb; pdb.set_trace()
# ppl = utils.calculate_ppl(tokenized_sent, model, device, PAD_TOKEN_ID, is_transformer_model=is_transformer_model)
ppl = utils.calculate_adjusted_ppl_acc(tokenized_sent, model, device, PAD_TOKEN_ID, tokenizer, utils.is_digit, is_transformer_model=is_transformer_model)
t2 = time.time()
print(f"ppl: {ppl} for sentence {sentence}, {(t2-t1)/100/len(tokenized_sent)} seconds/sample") | 43.139535 | 217 | 0.596496 | ###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
'''
cuda:0
ppl: 16.847383872958442 for sentence My SSN is 341752., 0.0031911754608154297 seconds
cpu
ppl: 16.847387889688246 for sentence My SSN is 341752., 0.00565678596496582 seconds
python calculate_ppl.py --checkpoint model/nodp/20210408/223716/data-wikitext-2-add10b__model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-50258__bs-256__bptt-35__lr-20.0__dp-False_partial-False.pt
'''
import argparse
import torch
import torch.nn as nn
import math
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2TokenizerFast
import utils
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
# parser.add_argument('--data', type=str, default='./data/wikitext-2/',
# help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='/home/wyshi/privacy/model/nodp/model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-33278__bs-256__bptt-35__lr-20.0__dp-False.pt',
help='model checkpoint to use')
# parser.add_argument('--outf', type=str, default='generated.txt',
# help='output file for generated text')
# parser.add_argument('--words', type=int, default='1000',
# help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', type=str, default="cuda:0",
help='use CUDA')
parser.add_argument('--data_type', type=str.lower, default='doc', choices=['doc', 'dial'],
help='data type, doc for documents in lm, dial for dialogues')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device(args.cuda)
###############################################################################
# Load model
###############################################################################
with open(args.checkpoint, 'rb') as f:
model = torch.load(f, map_location=device)
model.eval()
###############################################################################
# Load tokenizer
###############################################################################
is_dial = args.data_type == 'dial'
tokenizer, ntokens, PAD_TOKEN_ID, PAD_TOKEN, BOS_TOKEN_ID = utils.load_tokenizer(is_dialog=is_dial)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
sentence = [" My SSN is 341752.", " My SSN is 123456.", " My SSN is 341753."]
tokenized_sent = [tokenizer.encode(s) for s in sentence]
t1 = time.time()
for _ in range(100):
# import pdb; pdb.set_trace()
# ppl = utils.calculate_ppl(tokenized_sent, model, device, PAD_TOKEN_ID, is_transformer_model=is_transformer_model)
ppl = utils.calculate_adjusted_ppl_acc(tokenized_sent, model, device, PAD_TOKEN_ID, tokenizer, utils.is_digit, is_transformer_model=is_transformer_model)
t2 = time.time()
print(f"ppl: {ppl} for sentence {sentence}, {(t2-t1)/100/len(tokenized_sent)} seconds/sample") | 0 | 0 | 0 |
be114b8233b61c99e356bca8850dbc4cf22e319a | 683 | py | Python | game/displays/asci_display.py | scooler/tic-tac-toe | 8ec14c0c35dd48edc8718b478e5f4c83891a941a | [
"MIT"
] | null | null | null | game/displays/asci_display.py | scooler/tic-tac-toe | 8ec14c0c35dd48edc8718b478e5f4c83891a941a | [
"MIT"
] | null | null | null | game/displays/asci_display.py | scooler/tic-tac-toe | 8ec14c0c35dd48edc8718b478e5f4c83891a941a | [
"MIT"
] | null | null | null | # from board import Board
import os
| 20.088235 | 43 | 0.522694 | # from board import Board
import os
class ASCIDisplay:
def __init__(self, board):
self.board = board
def show_results(self):
print(self.board.result)
def draw(self):
os.system('clear')
self.draw_board()
def draw_board(self):
for i in range(0, self.board.x_size):
line = ''
for j in range(0, self.board.y_size):
if self.board.board[i, j] == 0:
line += ' '
if self.board.board[i, j] == 1:
line += 'X'
if self.board.board[i, j] == 2:
line += 'O'
if j < self.board.y_size - 1:
line += '|'
print(line)
if i < self.board.x_size - 1:
print("------")
| 526 | -3 | 122 |
fe71ae0f77d8bd1deef11eba95a9561c13081cf0 | 1,010 | py | Python | datastore/shared/postgresql_backend/__init__.py | jsangmeister/openslides-datastore-service | 7170f008ccac0b31c37ffeee083b972bc314660d | [
"MIT"
] | 2 | 2020-01-20T13:56:28.000Z | 2020-02-17T10:56:26.000Z | datastore/shared/postgresql_backend/__init__.py | jsangmeister/openslides-datastore-service | 7170f008ccac0b31c37ffeee083b972bc314660d | [
"MIT"
] | 122 | 2020-01-16T15:13:37.000Z | 2022-03-17T10:32:47.000Z | datastore/shared/postgresql_backend/__init__.py | jsangmeister/openslides-datastore-service | 7170f008ccac0b31c37ffeee083b972bc314660d | [
"MIT"
] | 7 | 2020-02-20T12:04:17.000Z | 2021-11-23T17:54:33.000Z | from .apply_list_updates import ListUpdatesDict, apply_fields # noqa
from .connection_handler import ConnectionHandler, DatabaseError # noqa
from .pg_connection_handler import retry_on_db_failure # noqa
from .sql_event_types import EVENT_TYPES # noqa
from .sql_query_helper import SqlQueryHelper
ALL_TABLES = (
"positions",
"events",
"id_sequences",
"collectionfields",
"events_to_collectionfields",
"models",
"migration_keyframes",
"migration_keyframe_models",
"migration_events",
"migration_positions",
)
| 31.5625 | 80 | 0.791089 | from .apply_list_updates import ListUpdatesDict, apply_fields # noqa
from .connection_handler import ConnectionHandler, DatabaseError # noqa
from .pg_connection_handler import retry_on_db_failure # noqa
from .sql_event_types import EVENT_TYPES # noqa
from .sql_query_helper import SqlQueryHelper
ALL_TABLES = (
"positions",
"events",
"id_sequences",
"collectionfields",
"events_to_collectionfields",
"models",
"migration_keyframes",
"migration_keyframe_models",
"migration_events",
"migration_positions",
)
def setup_di():
from datastore.shared.di import injector
from datastore.shared.services import ReadDatabase
from .pg_connection_handler import PgConnectionHandlerService
from .sql_read_database_backend_service import SqlReadDatabaseBackendService
injector.register(ConnectionHandler, PgConnectionHandlerService)
injector.register(SqlQueryHelper, SqlQueryHelper)
injector.register(ReadDatabase, SqlReadDatabaseBackendService)
| 433 | 0 | 23 |
71bc94489b9c4c04a2b93e235714a12a53fd731e | 247 | py | Python | charge/api/urls.py | andreclimaco/desafio_iclinic | d7aeff23144ea678148fece774a124d200474b86 | [
"BSD-3-Clause"
] | null | null | null | charge/api/urls.py | andreclimaco/desafio_iclinic | d7aeff23144ea678148fece774a124d200474b86 | [
"BSD-3-Clause"
] | null | null | null | charge/api/urls.py | andreclimaco/desafio_iclinic | d7aeff23144ea678148fece774a124d200474b86 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import ChargesViewSet
router = DefaultRouter()
router.register(r'charges', ChargesViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| 20.583333 | 48 | 0.773279 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import ChargesViewSet
router = DefaultRouter()
router.register(r'charges', ChargesViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| 0 | 0 | 0 |
21c76be4aecdf872c5fe135c74d7c423e43c1996 | 2,033 | py | Python | server/onesphere/onesphere_mdm/models/maintenance.py | masami10/onesphere | 763a5f15b374e24b1c89aba530a22b000aff1aa5 | [
"MIT"
] | 2 | 2022-01-19T06:36:55.000Z | 2022-01-26T05:53:20.000Z | server/onesphere/onesphere_mdm/models/maintenance.py | masami10/onesphere | 763a5f15b374e24b1c89aba530a22b000aff1aa5 | [
"MIT"
] | 2 | 2022-01-04T07:04:05.000Z | 2022-01-06T05:08:57.000Z | server/onesphere/onesphere_mdm/models/maintenance.py | masami10/onesphere | 763a5f15b374e24b1c89aba530a22b000aff1aa5 | [
"MIT"
] | 2 | 2021-12-25T07:18:22.000Z | 2022-02-22T01:35:44.000Z | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
| 36.963636 | 111 | 0.624201 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class MaintenanceEquipment(models.Model):
_inherit = 'maintenance.equipment'
_check_company_auto = True
technical_name = fields.Char('Technical name', related='category_id.technical_name', store=True)
expected_mtbf = fields.Integer(string='Expected MTBF', help='Expected Mean Time Between Failure')
mtbf = fields.Integer(string='MTBF',
help='Mean Time Between Failure, computed based on done corrective maintenances.')
mttr = fields.Integer(string='MTTR', help='Mean Time To Repair')
workcenter_id = fields.Many2one(
'mrp.workcenter', string='Work Center', check_company=True)
def button_mrp_workcenter(self):
self.ensure_one()
return {
'name': _('work centers'),
'view_mode': 'form',
'res_model': 'mrp.workcenter',
'view_id': self.env.ref('mrp.mrp_workcenter_view').id,
'type': 'ir.actions.act_window',
'res_id': self.workcenter_id.id,
'context': {
'default_company_id': self.company_id.id
}
}
class MaintenanceEquipmentCategory(models.Model):
_inherit = 'maintenance.equipment.category'
@api.depends('name')
def _compute_technical_name(self):
for category in self:
if category.technical_name:
continue
category.technical_name = category.name
technical_name = fields.Char('Technical name', required=True, compute=_compute_technical_name, store=True)
_sql_constraints = [
('technical_name_uniq', 'unique (technical_name)',
'The technical name of the equipment category must be unique!')
]
@api.model
def create(self, vals):
if not vals.get('technical_name', None):
vals.update({'technical_name': vals.get('name')})
return super(MaintenanceEquipmentCategory, self).create(vals)
| 794 | 1,119 | 48 |