hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8392c570e010d5cb9e704333e9a17850da50683
| 5,117
|
py
|
Python
|
ansible/lib/ansible/modules/core/network/cumulus/cl_interface_policy.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/network/cumulus/cl_interface_policy.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/network/cumulus/cl_interface_policy.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cl_interface_policy
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure interface enforcement policy on Cumulus Linux
description:
- This module affects the configuration files located in the interfaces
folder defined by ifupdown2. Interfaces port and port ranges listed in the
"allowed" parameter define what interfaces will be available on the
switch. If the user runs this module and has an interface configured on
the switch, but not found in the "allowed" list, this interface will be
unconfigured. By default this is `/etc/network/interface.d`
For more details go the Configuring Interfaces at
U(http://docs.cumulusnetworks.com).
notes:
- lo must be included in the allowed list.
- eth0 must be in allowed list if out of band management is done
options:
allowed:
description:
- List of ports to run initial run at 10G.
required: true
location:
description:
- Directory to store interface files.
default: '/etc/network/interfaces.d/'
'''
EXAMPLES = '''
Example playbook entries using the cl_interface_policy module.
- name: shows types of interface ranges supported
cl_interface_policy:
allowed: "lo eth0 swp1-9, swp11, swp12-13s0, swp12-30s1, swp12-30s2, bond0-12"
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# get list of interface files that are currently "configured".
# doesn't mean actually applied to the system, but most likely are
def read_current_int_dir(module):
module.custom_currentportlist = os.listdir(module.params.get('location'))
# take the allowed list and convert it to into a list
# of ports.
def convert_allowed_list_to_port_range(module):
allowedlist = module.params.get('allowed')
for portrange in allowedlist:
module.custom_allowedportlist += breakout_portrange(portrange)
def breakout_portrange(prange):
_m0 = re.match(r'(\w+[a-z.])(\d+)?-?(\d+)?(\w+)?', prange.strip())
# no range defined
if _m0.group(3) is None:
return [_m0.group(0)]
else:
portarray = []
intrange = range(int(_m0.group(2)), int(_m0.group(3)) + 1)
for _int in intrange:
portarray.append(''.join([_m0.group(1),
str(_int),
str(_m0.group(4) or '')
]
)
)
return portarray
# deletes the interface files
def unconfigure_interfaces(module):
currentportset = set(module.custom_currentportlist)
allowedportset = set(module.custom_allowedportlist)
remove_list = currentportset.difference(allowedportset)
fileprefix = module.params.get('location')
module.msg = "remove config for interfaces %s" % (', '.join(remove_list))
for _file in remove_list:
os.unlink(fileprefix + _file)
# check to see if policy should be enforced
# returns true if policy needs to be enforced
# that is delete interface files
def int_policy_enforce(module):
currentportset = set(module.custom_currentportlist)
allowedportset = set(module.custom_allowedportlist)
return not currentportset.issubset(allowedportset)
def main():
module = AnsibleModule(
argument_spec=dict(
allowed=dict(type='list', required=True),
location=dict(type='str', default='/etc/network/interfaces.d/')
),
)
module.custom_currentportlist = []
module.custom_allowedportlist = []
module.changed = False
module.msg = 'configured port list is part of allowed port list'
read_current_int_dir(module)
convert_allowed_list_to_port_range(module)
if int_policy_enforce(module):
module.changed = True
unconfigure_interfaces(module)
module.exit_json(changed=module.changed, msg=module.msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
import os
import shutil
if __name__ == '__main__':
main()
| 34.342282
| 88
| 0.684972
|
663ca397e6f7330c61e0113b3a164a9f18ce8103
| 1,656
|
py
|
Python
|
tmp/mturk_batch_batches.py
|
kcarnold/sentiment-slant-gi18
|
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
|
[
"MIT"
] | null | null | null |
tmp/mturk_batch_batches.py
|
kcarnold/sentiment-slant-gi18
|
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
|
[
"MIT"
] | null | null | null |
tmp/mturk_batch_batches.py
|
kcarnold/sentiment-slant-gi18
|
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 13:43:07 2017
@author: kcarnold
"""
# Batch MTurk batches
# Created with the help of the following on the MTurk Manage screen:
# Array.from(document.querySelectorAll('a[id*="batch_status"]')).forEach(x => {let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', x.getAttribute('href')+'/download'); document.body.appendChild(f);})
# or just
# batches.forEach(batch => { let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', `https://requester.mturk.com/batches/${batch}/download`); document.body.appendChild(f);})
#%%
import pandas as pd
import glob
#%%
csvs = sorted(glob.glob('*.csv'))
dfs = [pd.read_csv(csv) for csv in csvs]
#%%
full_concat = pd.concat(dfs, axis=0).drop_duplicates(subset='AssignmentId', keep='first')
concats = pd.concat(dfs, axis=0, join='inner').drop_duplicates(subset='AssignmentId', keep='first')
other_axis = pd.Index(concats.columns.tolist() + ['Answer.code'])
concats = pd.concat(dfs, axis=0, join_axes=[other_axis]).drop_duplicates('AssignmentId', keep='first')
concats.to_csv('all_assignments.csv', index=False)
# You'll also find this helpful:
# copy(Array.from(document.querySelectorAll('#batches_reviewable a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n'))
# copy(Array.from(document.querySelectorAll('#batches_reviewed a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n'))
| 51.75
| 280
| 0.702295
|
713c793f083b9a0e01bd2b522561bbf06be11a78
| 44,812
|
py
|
Python
|
bindings/python/legion.py
|
hzhou/Legion
|
7baedb254b3757113555f938ac73bbdf9171332b
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/legion.py
|
hzhou/Legion
|
7baedb254b3757113555f938ac73bbdf9171332b
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/legion.py
|
hzhou/Legion
|
7baedb254b3757113555f938ac73bbdf9171332b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import cffi
try:
import cPickle as pickle
except ImportError:
import pickle
import collections
import itertools
import math
import numpy
import os
import re
import subprocess
import sys
import threading
# Python 3.x compatibility:
try:
long # Python 2
except NameError:
long = int # Python 3
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
try:
zip_longest = itertools.izip_longest # Python 2
except:
zip_longest = itertools.zip_longest # Python 3
_pickle_version = pickle.HIGHEST_PROTOCOL # Use latest Pickle protocol
def find_legion_header():
def try_prefix(prefix_dir):
legion_h_path = os.path.join(prefix_dir, 'legion.h')
if os.path.exists(legion_h_path):
return prefix_dir, legion_h_path
# For in-source builds, find the header relative to the bindings
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
runtime_dir = os.path.join(root_dir, 'runtime')
result = try_prefix(runtime_dir)
if result:
return result
# If this was installed to a non-standard prefix, we might be able
# to guess from the directory structures
if os.path.basename(root_dir) == 'lib':
include_dir = os.path.join(os.path.dirname(root_dir), 'include')
result = try_prefix(include_dir)
if result:
return result
# Otherwise we have to hope that Legion is installed in a standard location
result = try_prefix('/usr/include')
if result:
return result
result = try_prefix('/usr/local/include')
if result:
return result
raise Exception('Unable to locate legion.h header file')
prefix_dir, legion_h_path = find_legion_header()
header = subprocess.check_output(['gcc', '-I', prefix_dir, '-E', '-P', legion_h_path]).decode('utf-8')
# Hack: Fix for Ubuntu 16.04 versions of standard library headers:
header = re.sub(r'typedef struct {.+?} max_align_t;', '', header, flags=re.DOTALL)
ffi = cffi.FFI()
ffi.cdef(header)
c = ffi.dlopen(None)
max_legion_python_tasks = 1000000
next_legion_task_id = c.legion_runtime_generate_library_task_ids(
c.legion_runtime_get_runtime(),
os.path.basename(__file__).encode('utf-8'),
max_legion_python_tasks)
max_legion_task_id = next_legion_task_id + max_legion_python_tasks
# Returns true if this module is running inside of a Legion
# executable. If false, then other Legion functionality should not be
# expected to work.
def inside_legion_executable():
try:
c.legion_get_current_time_in_micros()
except AttributeError:
return False
else:
return True
def input_args(filter_runtime_options=False):
raw_args = c.legion_runtime_get_input_args()
args = []
for i in range(raw_args.argc):
args.append(ffi.string(raw_args.argv[i]).decode('utf-8'))
if filter_runtime_options:
i = 1 # Skip program name
prefixes = ['-lg:', '-hl:', '-realm:', '-ll:', '-cuda:', '-numa:',
'-dm:', '-bishop:']
while i < len(args):
match = False
for prefix in prefixes:
if args[i].startswith(prefix):
match = True
break
if args[i] == '-level':
match = True
if args[i] == '-logfile':
match = True
if match:
args.pop(i)
args.pop(i) # Assume that every option has an argument
continue
i += 1
return args
# The Legion context is stored in thread-local storage. This assumes
# that the Python processor maintains the invariant that every task
# corresponds to one and only one thread.
_my = threading.local()
class Context(object):
__slots__ = ['context_root', 'context', 'runtime_root', 'runtime',
'task_root', 'task', 'regions', 'current_launch']
def __init__(self, context_root, runtime_root, task_root, regions):
self.context_root = context_root
self.context = self.context_root[0]
self.runtime_root = runtime_root
self.runtime = self.runtime_root[0]
self.task_root = task_root
self.task = self.task_root[0]
self.regions = regions
self.current_launch = None
def begin_launch(self, launch):
assert self.current_launch == None
self.current_launch = launch
def end_launch(self, launch):
assert self.current_launch == launch
self.current_launch = None
class DomainPoint(object):
__slots__ = ['impl']
def __init__(self, value):
assert(isinstance(value, _IndexValue))
self.impl = ffi.new('legion_domain_point_t *')
self.impl[0].dim = 1
self.impl[0].point_data[0] = int(value)
def raw_value(self):
return self.impl[0]
class Domain(object):
__slots__ = ['impl']
def __init__(self, extent, start=None):
if start is not None:
assert len(start) == len(extent)
else:
start = [0 for _ in extent]
assert 1 <= len(extent) <= 3
rect = ffi.new('legion_rect_{}d_t *'.format(len(extent)))
for i in xrange(len(extent)):
rect[0].lo.x[i] = start[i]
rect[0].hi.x[i] = start[i] + extent[i] - 1
self.impl = getattr(c, 'legion_domain_from_rect_{}d'.format(len(extent)))(rect[0])
def raw_value(self):
return self.impl
class Future(object):
__slots__ = ['handle', 'value_type', 'argument_number']
def __init__(self, value, value_type=None, argument_number=None):
if value is None:
self.handle = None
elif isinstance(value, Future):
value.resolve_handle()
self.handle = c.legion_future_copy(value.handle)
if value_type is None:
value_type = value.value_type
elif value_type is not None:
if value_type.size > 0:
value_ptr = ffi.new(ffi.getctype(value_type.cffi_type, '*'), value)
else:
value_ptr = ffi.NULL
value_size = value_type.size
self.handle = c.legion_future_from_untyped_pointer(_my.ctx.runtime, value_ptr, value_size)
else:
value_str = pickle.dumps(value, protocol=_pickle_version)
value_size = len(value_str)
value_ptr = ffi.new('char[]', value_size)
ffi.buffer(value_ptr, value_size)[:] = value_str
self.handle = c.legion_future_from_untyped_pointer(_my.ctx.runtime, value_ptr, value_size)
self.value_type = value_type
self.argument_number = argument_number
@staticmethod
def from_cdata(value, *args, **kwargs):
result = Future(None, *args, **kwargs)
result.handle = c.legion_future_copy(value)
return result
@staticmethod
def from_buffer(value, *args, **kwargs):
result = Future(None, *args, **kwargs)
result.handle = c.legion_future_from_untyped_pointer(_my.ctx.runtime, ffi.from_buffer(value), len(value))
return result
def __del__(self):
if self.handle is not None:
c.legion_future_destroy(self.handle)
def __reduce__(self):
if self.argument_number is None:
raise Exception('Cannot pickle a Future except when used as a task argument')
return (Future, (None, self.value_type, self.argument_number))
def resolve_handle(self):
if self.handle is None and self.argument_number is not None:
self.handle = c.legion_future_copy(
c.legion_task_get_future(_my.ctx.task, self.argument_number))
def get(self):
self.resolve_handle()
if self.handle is None:
return
if self.value_type is None:
value_ptr = c.legion_future_get_untyped_pointer(self.handle)
value_size = c.legion_future_get_untyped_size(self.handle)
assert value_size > 0
value_str = ffi.unpack(ffi.cast('char *', value_ptr), value_size)
value = pickle.loads(value_str)
return value
elif self.value_type.size == 0:
c.legion_future_get_void_result(self.handle)
else:
expected_size = ffi.sizeof(self.value_type.cffi_type)
value_ptr = c.legion_future_get_untyped_pointer(self.handle)
value_size = c.legion_future_get_untyped_size(self.handle)
assert value_size == expected_size
value = ffi.cast(ffi.getctype(self.value_type.cffi_type, '*'), value_ptr)[0]
return value
def get_buffer(self):
self.resolve_handle()
if self.handle is None:
return
value_ptr = c.legion_future_get_untyped_pointer(self.handle)
value_size = c.legion_future_get_untyped_size(self.handle)
return ffi.buffer(value_ptr, value_size)
class FutureMap(object):
__slots__ = ['handle', 'value_type']
def __init__(self, handle, value_type=None):
self.handle = c.legion_future_map_copy(handle)
self.value_type = value_type
def __del__(self):
c.legion_future_map_destroy(self.handle)
def __getitem__(self, point):
domain_point = DomainPoint(_IndexValue(point))
return Future.from_cdata(
c.legion_future_map_get_future(self.handle, domain_point.raw_value()),
value_type=self.value_type)
class Type(object):
__slots__ = ['numpy_type', 'cffi_type', 'size']
def __init__(self, numpy_type, cffi_type):
assert (numpy_type is None) == (cffi_type is None)
self.numpy_type = numpy_type
self.cffi_type = cffi_type
self.size = numpy.dtype(numpy_type).itemsize if numpy_type is not None else 0
def __reduce__(self):
return (Type, (self.numpy_type, self.cffi_type))
# Pre-defined Types
void = Type(None, None)
float16 = Type(numpy.float16, 'short float')
float32 = Type(numpy.float32, 'float')
float64 = Type(numpy.float64, 'double')
int8 = Type(numpy.int8, 'int8_t')
int16 = Type(numpy.int16, 'int16_t')
int32 = Type(numpy.int32, 'int32_t')
int64 = Type(numpy.int64, 'int64_t')
uint8 = Type(numpy.uint8, 'uint8_t')
uint16 = Type(numpy.uint16, 'uint16_t')
uint32 = Type(numpy.uint32, 'uint32_t')
uint64 = Type(numpy.uint64, 'uint64_t')
class Privilege(object):
__slots__ = ['read', 'write', 'discard']
def __init__(self, read=False, write=False, discard=False):
self.read = read
self.write = write
self.discard = discard
def _fields(self):
return (self.read, self.write, self.discard)
def __eq__(self, other):
return isinstance(other, Privilege) and self._fields() == other._fields()
def __cmp__(self, other):
assert isinstance(other, Privilege)
return self._fields().__cmp__(other._fields())
def __hash__(self):
return hash(self._fields())
def __call__(self, fields):
return PrivilegeFields(self, fields)
def _legion_privilege(self):
bits = 0
if self.discard:
assert self.write
bits |= 2 # WRITE_DISCARD
else:
if self.write: bits = 7 # READ_WRITE
elif self.read: bits = 1 # READ_ONLY
return bits
class PrivilegeFields(Privilege):
__slots__ = ['read', 'write', 'discard', 'fields']
def __init__(self, privilege, fields):
Privilege.__init__(self, privilege.read, privilege.write, privilege.discard)
self.fields = fields
# Pre-defined Privileges
N = Privilege()
R = Privilege(read=True)
RO = Privilege(read=True)
RW = Privilege(read=True, write=True)
WD = Privilege(write=True, discard=True)
# Hack: Can't pickle static methods.
def _Ispace_unpickle(ispace_tid, ispace_id, ispace_type_tag):
handle = ffi.new('legion_index_space_t *')
handle[0].tid = ispace_tid
handle[0].id = ispace_id
handle[0].type_tag = ispace_type_tag
return Ispace(handle[0])
class Ispace(object):
__slots__ = ['handle']
def __init__(self, handle):
# Important: Copy handle. Do NOT assume ownership.
self.handle = ffi.new('legion_index_space_t *', handle)
def __reduce__(self):
return (_Ispace_unpickle,
(self.handle[0].tid,
self.handle[0].id,
self.handle[0].type_tag))
@staticmethod
def create(extent, start=None):
domain = Domain(extent, start=start).raw_value()
handle = c.legion_index_space_create_domain(_my.ctx.runtime, _my.ctx.context, domain)
return Ispace(handle)
# Hack: Can't pickle static methods.
def _Fspace_unpickle(fspace_id, field_ids, field_types):
handle = ffi.new('legion_field_space_t *')
handle[0].id = fspace_id
return Fspace(handle[0], field_ids, field_types)
class Fspace(object):
__slots__ = ['handle', 'field_ids', 'field_types']
def __init__(self, handle, field_ids, field_types):
# Important: Copy handle. Do NOT assume ownership.
self.handle = ffi.new('legion_field_space_t *', handle)
self.field_ids = field_ids
self.field_types = field_types
def __reduce__(self):
return (_Fspace_unpickle,
(self.handle[0].id,
self.field_ids,
self.field_types))
@staticmethod
def create(fields):
handle = c.legion_field_space_create(_my.ctx.runtime, _my.ctx.context)
alloc = c.legion_field_allocator_create(
_my.ctx.runtime, _my.ctx.context, handle)
field_ids = collections.OrderedDict()
field_types = collections.OrderedDict()
for field_name, field_entry in fields.items():
try:
field_type, field_id = field_entry
except TypeError:
field_type = field_entry
field_id = ffi.cast('legion_field_id_t', -1) # AUTO_GENERATE_ID
field_id = c.legion_field_allocator_allocate_field(
alloc, field_type.size, field_id)
c.legion_field_id_attach_name(
_my.ctx.runtime, handle, field_id, field_name.encode('utf-8'), False)
field_ids[field_name] = field_id
field_types[field_name] = field_type
c.legion_field_allocator_destroy(alloc)
return Fspace(handle, field_ids, field_types)
# Hack: Can't pickle static methods.
def _Region_unpickle(tree_id, ispace, fspace):
handle = ffi.new('legion_logical_region_t *')
handle[0].tree_id = tree_id
handle[0].index_space.tid = ispace.handle[0].tid
handle[0].index_space.id = ispace.handle[0].id
handle[0].field_space.id = fspace.handle[0].id
return Region(handle[0], ispace, fspace)
class Region(object):
__slots__ = ['handle', 'ispace', 'fspace',
'instances', 'privileges', 'instance_wrappers']
# Make this speak the Type interface
numpy_type = None
cffi_type = 'legion_logical_region_t'
size = ffi.sizeof(cffi_type)
def __init__(self, handle, ispace, fspace):
# Important: Copy handle. Do NOT assume ownership.
self.handle = ffi.new('legion_logical_region_t *', handle)
self.ispace = ispace
self.fspace = fspace
self.instances = {}
self.privileges = {}
self.instance_wrappers = {}
def __reduce__(self):
return (_Region_unpickle,
(self.handle[0].tree_id,
self.ispace,
self.fspace))
@staticmethod
def create(ispace, fspace):
if not isinstance(ispace, Ispace):
ispace = Ispace.create(ispace)
if not isinstance(fspace, Fspace):
fspace = Fspace.create(fspace)
handle = c.legion_logical_region_create(
_my.ctx.runtime, _my.ctx.context, ispace.handle[0], fspace.handle[0], False)
result = Region(handle, ispace, fspace)
for field_name in fspace.field_ids.keys():
result.set_privilege(field_name, RW)
return result
def destroy(self):
# This is not something you want to have happen in a
# destructor, since regions may outlive the lifetime of the handle.
c.legion_logical_region_destroy(
_my.ctx.runtime, _my.ctx.context, self.handle[0])
# Clear out references. Technically unnecessary but avoids abuse.
del self.instance_wrappers
del self.instances
del self.handle
del self.ispace
del self.fspace
def set_privilege(self, field_name, privilege):
assert field_name not in self.privileges
self.privileges[field_name] = privilege
def set_instance(self, field_name, instance, privilege=None):
assert field_name not in self.instances
self.instances[field_name] = instance
if privilege is not None:
assert field_name not in self.privileges
self.privileges[field_name] = privilege
def map_inline(self):
fields_by_privilege = collections.defaultdict(set)
for field_name, privilege in self.privileges.items():
fields_by_privilege[privilege].add(field_name)
for privilege, field_names in fields_by_privilege.items():
launcher = c.legion_inline_launcher_create_logical_region(
self.handle[0],
privilege._legion_privilege(), 0, # EXCLUSIVE
self.handle[0],
0, False, 0, 0)
for field_name in field_names:
c.legion_inline_launcher_add_field(
launcher, self.fspace.field_ids[field_name], True)
instance = c.legion_inline_launcher_execute(
_my.ctx.runtime, _my.ctx.context, launcher)
for field_name in field_names:
self.set_instance(field_name, instance)
def __getattr__(self, field_name):
if field_name in self.fspace.field_ids:
if field_name not in self.instances:
if self.privileges[field_name] is None:
raise Exception('Invalid attempt to access field "%s" without privileges' % field_name)
self.map_inline()
if field_name not in self.instance_wrappers:
self.instance_wrappers[field_name] = RegionField(
self, field_name)
return self.instance_wrappers[field_name]
else:
raise AttributeError()
class RegionField(numpy.ndarray):
# NumPy requires us to implement __new__ for subclasses of ndarray:
# https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
def __new__(cls, region, field_name):
accessor = RegionField._get_accessor(region, field_name)
initializer = RegionField._get_array_initializer(region, field_name, accessor)
obj = numpy.asarray(initializer).view(cls)
obj.accessor = accessor
return obj
@staticmethod
def _get_accessor(region, field_name):
# Note: the accessor needs to be kept alive, to make sure to
# save the result of this function in an instance variable.
instance = region.instances[field_name]
domain = c.legion_index_space_get_domain(
_my.ctx.runtime, region.ispace.handle[0])
dim = domain.dim
get_accessor = getattr(c, 'legion_physical_region_get_field_accessor_array_{}d'.format(dim))
return get_accessor(instance, region.fspace.field_ids[field_name])
@staticmethod
def _get_base_and_stride(region, field_name, accessor):
domain = c.legion_index_space_get_domain(
_my.ctx.runtime, region.ispace.handle[0])
dim = domain.dim
rect = getattr(c, 'legion_domain_get_rect_{}d'.format(dim))(domain)
subrect = ffi.new('legion_rect_{}d_t *'.format(dim))
offsets = ffi.new('legion_byte_offset_t[]', dim)
base_ptr = getattr(c, 'legion_accessor_array_{}d_raw_rect_ptr'.format(dim))(
accessor, rect, subrect, offsets)
assert base_ptr
for i in xrange(dim):
assert subrect[0].lo.x[i] == rect.lo.x[i]
assert subrect[0].hi.x[i] == rect.hi.x[i]
assert offsets[0].offset == region.fspace.field_types[field_name].size
shape = tuple(rect.hi.x[i] - rect.lo.x[i] + 1 for i in xrange(dim))
strides = tuple(offsets[i].offset for i in xrange(dim))
return base_ptr, shape, strides
@staticmethod
def _get_array_initializer(region, field_name, accessor):
base_ptr, shape, strides = RegionField._get_base_and_stride(
region, field_name, accessor)
field_type = region.fspace.field_types[field_name]
# Numpy doesn't know about CFFI pointers, so we have to cast
# this to a Python long before we can hand it off to Numpy.
base_ptr = long(ffi.cast("size_t", base_ptr))
return _RegionNdarray(shape, field_type, base_ptr, strides, False)
# This is a dummy object that is only used as an initializer for the
# RegionField object above. It is thrown away as soon as the
# RegionField is constructed.
class _RegionNdarray(object):
__slots__ = ['__array_interface__']
def __init__(self, shape, field_type, base_ptr, strides, read_only):
# See: https://docs.scipy.org/doc/numpy/reference/arrays.interface.html
self.__array_interface__ = {
'version': 3,
'shape': shape,
'typestr': numpy.dtype(field_type.numpy_type).str,
'data': (base_ptr, read_only),
'strides': strides,
}
def define_regent_argument_struct(task_id, argument_types, privileges, return_type, arguments):
if argument_types is None:
raise Exception('Arguments must be typed in extern Regent tasks')
struct_name = 'task_args_%s' % task_id
n_fields = int(math.ceil(len(argument_types)/64.))
fields = ['uint64_t %s[%s];' % ('__map', n_fields)]
for i, arg_type in enumerate(argument_types):
arg_name = '__arg_%s' % i
fields.append('%s %s;' % (arg_type.cffi_type, arg_name))
for i, arg in enumerate(arguments):
if isinstance(arg, Region):
for j, field_type in enumerate(arg.fspace.field_types.values()):
arg_name = '__arg_%s_field_%s' % (i, j)
fields.append('legion_field_id_t %s;' % arg_name)
struct = 'typedef struct %s { %s } %s;' % (struct_name, ' '.join(fields), struct_name)
ffi.cdef(struct)
return struct_name
class ExternTask(object):
__slots__ = ['argument_types', 'privileges', 'return_type',
'calling_convention', 'task_id', '_argument_struct']
def __init__(self, task_id, argument_types=None, privileges=None,
return_type=void, calling_convention=None):
self.argument_types = argument_types
if privileges is not None:
privileges = [(x if x is not None else N) for x in privileges]
self.privileges = privileges
self.return_type = return_type
self.calling_convention = calling_convention
assert isinstance(task_id, int)
self.task_id = task_id
self._argument_struct = None
def argument_struct(self, args):
if self.calling_convention == 'regent' and self._argument_struct is None:
self._argument_struct = define_regent_argument_struct(
self.task_id, self.argument_types, self.privileges, self.return_type, args)
return self._argument_struct
def __call__(self, *args):
return self.spawn_task(*args)
def spawn_task(self, *args):
if _my.ctx.current_launch:
return _my.ctx.current_launch.spawn_task(self, *args)
return TaskLaunch().spawn_task(self, *args)
def extern_task(**kwargs):
return ExternTask(**kwargs)
def get_qualname(fn):
# Python >= 3.3 only
try:
return fn.__qualname__.split('.')
except AttributeError:
pass
# Python < 3.3
try:
import qualname
return qualname.qualname(fn).split('.')
except ImportError:
pass
# Hack: Issue error if we're wrapping a class method and failed to
# get the qualname
import inspect
context = [x[0].f_code.co_name for x in inspect.stack()
if '__module__' in x[0].f_code.co_names and
inspect.getmodule(x[0].f_code).__name__ != __name__]
if len(context) > 0:
raise Exception('To use a task defined in a class, please upgrade to Python >= 3.3 or install qualname (e.g. pip install qualname)')
return [fn.__name__]
class Task (object):
__slots__ = ['body', 'privileges', 'return_type',
'leaf', 'inner', 'idempotent', 'replicable',
'calling_convention', 'argument_struct',
'task_id', 'registered']
def __init__(self, body, privileges=None, return_type=None,
leaf=False, inner=False, idempotent=False, replicable=False,
register=True, task_id=None, top_level=False):
self.body = body
if privileges is not None:
privileges = [(x if x is not None else N) for x in privileges]
self.privileges = privileges
self.return_type = return_type
self.leaf = bool(leaf)
self.inner = bool(inner)
self.idempotent = bool(idempotent)
self.replicable = bool(replicable)
self.calling_convention = 'python'
self.argument_struct = None
self.task_id = None
if register:
self.register(task_id, top_level)
def __call__(self, *args):
# Hack: This entrypoint needs to be able to handle both being
# called in user code (to launch a task) and as the task
# wrapper when the task itself executes. Unfortunately isn't a
# good way to disentangle these. Detect if we're in the task
# wrapper case by checking the number and types of arguments.
if len(args) == 3 and \
isinstance(args[0], bytearray) and \
isinstance(args[1], bytearray) and \
isinstance(args[2], long):
return self.execute_task(*args)
else:
return self.spawn_task(*args)
def spawn_task(self, *args):
if _my.ctx.current_launch:
return _my.ctx.current_launch.spawn_task(self, *args)
return TaskLaunch().spawn_task(self, *args)
def execute_task(self, raw_args, user_data, proc):
raw_arg_ptr = ffi.new('char[]', bytes(raw_args))
raw_arg_size = len(raw_args)
# Execute preamble to obtain Legion API context.
task = ffi.new('legion_task_t *')
raw_regions = ffi.new('legion_physical_region_t **')
num_regions = ffi.new('unsigned *')
context = ffi.new('legion_context_t *')
runtime = ffi.new('legion_runtime_t *')
c.legion_task_preamble(
raw_arg_ptr, raw_arg_size, proc,
task, raw_regions, num_regions, context, runtime)
# Decode arguments from Pickle format.
if c.legion_task_get_is_index_space(task[0]):
arg_ptr = ffi.cast('char *', c.legion_task_get_local_args(task[0]))
arg_size = c.legion_task_get_local_arglen(task[0])
else:
arg_ptr = ffi.cast('char *', c.legion_task_get_args(task[0]))
arg_size = c.legion_task_get_arglen(task[0])
if arg_size > 0 and c.legion_task_get_depth(task[0]) > 0:
args = pickle.loads(ffi.unpack(arg_ptr, arg_size))
else:
args = ()
# Unpack regions.
regions = []
for i in xrange(num_regions[0]):
regions.append(raw_regions[0][i])
# Unpack physical regions.
if self.privileges is not None:
req = 0
for i, arg in zip(range(len(args)), args):
if isinstance(arg, Region):
assert req < num_regions[0] and req < len(self.privileges)
instance = raw_regions[0][req]
req += 1
priv = self.privileges[i]
if hasattr(priv, 'fields'):
assert set(priv.fields) <= set(arg.fspace.field_ids.keys())
for name, fid in arg.fspace.field_ids.items():
if not hasattr(priv, 'fields') or name in priv.fields:
arg.set_instance(name, instance, priv)
assert req == num_regions[0]
# Build context.
ctx = Context(context, runtime, task, regions)
# Ensure that we're not getting tangled up in another
# thread. There should be exactly one thread per task.
try:
_my.ctx
except AttributeError:
pass
else:
raise Exception('thread-local context already set')
# Store context in thread-local storage.
_my.ctx = ctx
# Execute task body.
result = self.body(*args)
# Encode result.
if not self.return_type:
result_str = pickle.dumps(result, protocol=_pickle_version)
result_size = len(result_str)
result_ptr = ffi.new('char[]', result_size)
ffi.buffer(result_ptr, result_size)[:] = result_str
else:
if self.return_type.size > 0:
result_ptr = ffi.new(ffi.getctype(self.return_type.cffi_type, '*'), result)
else:
result_ptr = ffi.NULL
result_size = self.return_type.size
# Execute postamble.
c.legion_task_postamble(runtime[0], context[0], result_ptr, result_size)
# Clear thread-local storage.
del _my.ctx
def register(self, task_id, top_level_task):
assert(self.task_id is None)
if not task_id:
if not top_level_task:
global next_legion_task_id
task_id = next_legion_task_id
next_legion_task_id += 1
# If we ever hit this then we need to allocate more task IDs
assert task_id < max_legion_task_id
else:
task_id = 1 # Predefined value for the top-level task
execution_constraints = c.legion_execution_constraint_set_create()
c.legion_execution_constraint_set_add_processor_constraint(
execution_constraints, c.PY_PROC)
layout_constraints = c.legion_task_layout_constraint_set_create()
# FIXME: Add layout constraints
options = ffi.new('legion_task_config_options_t *')
options[0].leaf = self.leaf
options[0].inner = self.inner
options[0].idempotent = self.idempotent
options[0].replicable = self.replicable
qualname = get_qualname(self.body)
task_name = ('%s.%s' % (self.body.__module__, '.'.join(qualname)))
c_qualname_comps = [ffi.new('char []', comp.encode('utf-8')) for comp in qualname]
c_qualname = ffi.new('char *[]', c_qualname_comps)
c.legion_runtime_register_task_variant_python_source_qualname(
c.legion_runtime_get_runtime(),
task_id,
task_name.encode('utf-8'),
False, # Global
execution_constraints,
layout_constraints,
options[0],
self.body.__module__.encode('utf-8'),
c_qualname,
len(qualname),
ffi.NULL,
0)
c.legion_execution_constraint_set_destroy(execution_constraints)
c.legion_task_layout_constraint_set_destroy(layout_constraints)
self.task_id = task_id
return self
def task(body=None, **kwargs):
if body is None:
return lambda body: task(body, **kwargs)
return Task(body, **kwargs)
class _TaskLauncher(object):
__slots__ = ['task']
def __init__(self, task):
self.task = task
def preprocess_args(self, args):
return [
arg._legion_preprocess_task_argument()
if hasattr(arg, '_legion_preprocess_task_argument') else arg
for arg in args]
def gather_futures(self, args):
normal = []
futures = []
for arg in args:
if isinstance(arg, Future):
arg = Future(arg, argument_number=len(futures))
futures.append(arg)
normal.append(arg)
return normal, futures
def encode_args(self, args):
task_args = ffi.new('legion_task_argument_t *')
task_args_buffer = None
if self.task.calling_convention == 'python':
arg_str = pickle.dumps(args, protocol=_pickle_version)
task_args_buffer = ffi.new('char[]', arg_str)
task_args[0].args = task_args_buffer
task_args[0].arglen = len(arg_str)
elif self.task.calling_convention == 'regent':
arg_struct = self.task.argument_struct(args)
task_args_buffer = ffi.new('%s*' % arg_struct)
# FIXME: Correct for > 64 arguments.
getattr(task_args_buffer, '__map')[0] = 0 # Currently we never pass futures.
for i, arg in enumerate(args):
arg_name = '__arg_%s' % i
arg_value = arg
if hasattr(arg, 'handle'):
arg_value = arg.handle[0]
setattr(task_args_buffer, arg_name, arg_value)
for i, arg in enumerate(args):
if isinstance(arg, Region):
for j, field_id in enumerate(arg.fspace.field_ids.values()):
arg_name = '__arg_%s_field_%s' % (i, j)
setattr(task_args_buffer, arg_name, field_id)
task_args[0].args = task_args_buffer
task_args[0].arglen = ffi.sizeof(arg_struct)
else:
# FIXME: External tasks need a dedicated calling
# convention to permit the passing of task arguments.
task_args[0].args = ffi.NULL
task_args[0].arglen = 0
# WARNING: Need to return the interior buffer or else it will be GC'd
return task_args, task_args_buffer
def spawn_task(self, *args):
assert(isinstance(_my.ctx, Context))
args = self.preprocess_args(args)
args, futures = self.gather_futures(args)
task_args, _ = self.encode_args(args)
# Construct the task launcher.
launcher = c.legion_task_launcher_create(
self.task.task_id, task_args[0], c.legion_predicate_true(), 0, 0)
for i, arg in zip(range(len(args)), args):
if isinstance(arg, Region):
assert i < len(self.task.privileges)
priv = self.task.privileges[i]
req = c.legion_task_launcher_add_region_requirement_logical_region(
launcher, arg.handle[0],
priv._legion_privilege(),
0, # EXCLUSIVE
arg.handle[0], 0, False)
if hasattr(priv, 'fields'):
assert set(priv.fields) <= set(arg.fspace.field_ids.keys())
for name, fid in arg.fspace.field_ids.items():
if not hasattr(priv, 'fields') or name in priv.fields:
c.legion_task_launcher_add_field(
launcher, req, fid, True)
elif isinstance(arg, Future):
c.legion_task_launcher_add_future(launcher, arg.handle)
elif self.task.calling_convention is None:
# FIXME: Task arguments aren't being encoded AT ALL;
# at least throw an exception so that the user knows
raise Exception('External tasks do not support non-region arguments')
# Launch the task.
result = c.legion_task_launcher_execute(
_my.ctx.runtime, _my.ctx.context, launcher)
c.legion_task_launcher_destroy(launcher)
# Build future of result.
future = Future.from_cdata(result, value_type=self.task.return_type)
c.legion_future_destroy(result)
return future
class _IndexLauncher(_TaskLauncher):
__slots__ = ['task', 'domain', 'local_args', 'future_args', 'future_map']
def __init__(self, task, domain):
super(_IndexLauncher, self).__init__(task)
self.domain = domain
self.local_args = c.legion_argument_map_create()
self.future_args = []
self.future_map = None
def __del__(self):
c.legion_argument_map_destroy(self.local_args)
def spawn_task(self, *args):
raise Exception('IndexLaunch does not support spawn_task')
def attach_local_args(self, index, *args):
point = DomainPoint(index)
task_args, _ = self.encode_args(args)
c.legion_argument_map_set_point(
self.local_args, point.raw_value(), task_args[0], False)
def attach_future_args(self, *args):
self.future_args = args
def launch(self):
# All arguments are passed as local, so global is NULL.
global_args = ffi.new('legion_task_argument_t *')
global_args[0].args = ffi.NULL
global_args[0].arglen = 0
# Construct the task launcher.
launcher = c.legion_index_launcher_create(
self.task.task_id, self.domain.raw_value(),
global_args[0], self.local_args,
c.legion_predicate_true(), False, 0, 0)
for arg in self.future_args:
c.legion_index_launcher_add_future(launcher, arg.handle)
# Launch the task.
result = c.legion_index_launcher_execute(
_my.ctx.runtime, _my.ctx.context, launcher)
c.legion_index_launcher_destroy(launcher)
# Build future (map) of result.
self.future_map = FutureMap(result)
c.legion_future_map_destroy(result)
class TaskLaunch(object):
__slots__ = []
def spawn_task(self, task, *args):
launcher = _TaskLauncher(task=task)
return launcher.spawn_task(*args)
class _IndexValue(object):
__slots__ = ['value']
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
def __index__(self):
return self.value
def __str__(self):
return str(self.value)
def __repr__(self):
return repr(self.value)
def _legion_preprocess_task_argument(self):
return self.value
class _FuturePoint(object):
__slots__ = ['launcher', 'point', 'future']
def __init__(self, launcher, point):
self.launcher = launcher
self.point = point
self.future = None
def get(self):
if self.launcher.future_map is None:
raise Exception('Cannot retrieve a future from an index launch until the launch is complete')
self.future = self.launcher.future_map[self.point]
# Clear launcher and point
del self.launcher
del self.point
return self.future.get()
class IndexLaunch(object):
__slots__ = ['extent', 'domain', 'launcher', 'point',
'saved_task', 'saved_args']
def __init__(self, extent):
assert len(extent) == 1
self.extent = extent
self.domain = Domain(extent)
self.launcher = None
self.point = None
self.saved_task = None
self.saved_args = None
def __iter__(self):
_my.ctx.begin_launch(self)
self.point = _IndexValue(None)
for i in xrange(self.extent[0]):
self.point.value = i
yield self.point
_my.ctx.end_launch(self)
self.launch()
def ensure_launcher(self, task):
if self.launcher is None:
self.launcher = _IndexLauncher(task=task, domain=self.domain)
def check_compatibility(self, task, *args):
# The tasks in a launch must conform to the following constraints:
# * Only one task can be launched.
# * The arguments must be compatible:
# * At a given argument position, the value must always
# be a special value, or always not.
# * Special values include: regions and futures.
# * If a region, the value must be symbolic (i.e. able
# to be analyzed as a function of the index expression).
# * If a future, the values must be literally identical
# (i.e. each argument slot in the launch can only
# accept a single future value.)
if self.saved_task is None:
self.saved_task = task
if task != self.saved_task:
raise Exception('An IndexLaunch may contain only one task launch')
if self.saved_args is None:
self.saved_args = args
for arg, saved_arg in zip_longest(args, self.saved_args):
# TODO: Add support for region arguments
if isinstance(arg, Region) or isinstance(arg, RegionField):
raise Exception('TODO: Support region arguments to an IndexLaunch')
elif isinstance(arg, Future):
if arg != saved_arg:
raise Exception('Future argument to IndexLaunch does not match previous value at this position')
def spawn_task(self, task, *args):
self.ensure_launcher(task)
self.check_compatibility(task, *args)
args = self.launcher.preprocess_args(args)
args, futures = self.launcher.gather_futures(args)
self.launcher.attach_local_args(self.point, *args)
self.launcher.attach_future_args(*futures)
# TODO: attach region args
return _FuturePoint(self.launcher, int(self.point))
def launch(self):
self.launcher.launch()
@task(leaf=True)
def _dummy_task():
return 1
def execution_fence(block=False):
c.legion_runtime_issue_execution_fence(_my.ctx.runtime, _my.ctx.context)
if block:
_dummy_task().get()
class Tunable(object):
# FIXME: Deduplicate this with DefaultMapper::DefaultTunables
NODE_COUNT = 0
LOCAL_CPUS = 1
LOCAL_GPUS = 2
LOCAL_IOS = 3
LOCAL_OMPS = 4
LOCAL_PYS = 5
GLOBAL_CPUS = 6
GLOBAL_GPUS = 7
GLOBAL_IOS = 8
GLOBAL_OMPS = 9
GLOBAL_PYS = 10
@staticmethod
def select(tunable_id):
result = c.legion_runtime_select_tunable_value(
_my.ctx.runtime, _my.ctx.context, tunable_id, 0, 0)
future = Future.from_cdata(result, value_type=uint64)
c.legion_future_destroy(result)
return future
def execute_as_script():
args = input_args(True)
if len(args) < 1:
return False, False # no idea what's going on here, just return
if os.path.basename(args[0]) != 'legion_python':
return False, False # not in legion_python
if len(args) < 2 or args[1].startswith('-'):
return True, False # argument is a flag
# If it has an extension, we're going to guess that it was
# intended to be a script.
return True, len(os.path.splitext(args[1])[1]) > 1
is_legion_python, is_script = execute_as_script()
if is_script:
# We can't use runpy for this since runpy is aggressive about
# cleaning up after itself and removes the module before execution
# has completed.
def run_path(filename, run_name=None):
import imp
module = imp.new_module(run_name)
setattr(module, '__name__', run_name)
setattr(module, '__file__', filename)
setattr(module, '__loader__', None)
setattr(module, '__package__', run_name.rpartition('.')[0])
assert run_name not in sys.modules
sys.modules[run_name] = module
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, module.__dict__)
@task(top_level=True, replicable=True)
def legion_main():
args = input_args(True)
assert len(args) >= 2
sys.argv = list(args)
run_path(args[1], run_name='__legion_main__')
elif is_legion_python:
print('WARNING: Executing Python modules via legion_python has been deprecated.')
print('It is now recommended to run the script directly by passing the path')
print('to legion_python.')
print()
| 37.343333
| 140
| 0.629809
|
6d30b1f9528596a596bb694ceb8f9b4e6d375db9
| 3,707
|
py
|
Python
|
utils.py
|
ruTESTER/pminer
|
76a20bc18aaae3d1c577756fc48f81ede81dc89d
|
[
"MIT"
] | 6
|
2016-08-09T21:09:27.000Z
|
2021-01-12T21:07:44.000Z
|
utils.py
|
Brideau/pokeminer
|
18d8502b1c7c11c57d69a52049ee2592358feb14
|
[
"MIT"
] | null | null | null |
utils.py
|
Brideau/pokeminer
|
18d8502b1c7c11c57d69a52049ee2592358feb14
|
[
"MIT"
] | null | null | null |
import math
from geopy import distance, Point
import config
def get_map_center():
"""Returns center of the map"""
lat = (config.MAP_END[0] + config.MAP_START[0]) / 2
lon = (config.MAP_END[1] + config.MAP_START[1]) / 2
return lat, lon
def get_scan_area():
"""Returns the square kilometers for configured scan area"""
lat1 = config.MAP_START[0]
lat2 = config.MAP_END[0]
lon1 = config.MAP_START[1]
lon2 = config.MAP_END[1]
p1 = Point(lat1, lon1)
p2 = Point(lat1, lon2)
p3 = Point(lat1, lon1)
p4 = Point(lat2, lon1)
width = distance.distance(p1, p2).kilometers
height = distance.distance(p3, p4).kilometers
area = int(width * height)
return area
def get_start_coords(worker_no):
"""Returns center of square for given worker"""
grid = config.GRID
total_workers = grid[0] * grid[1]
per_column = int(total_workers / grid[0])
column = worker_no % per_column
row = int(worker_no / per_column)
part_lat = (config.MAP_END[0] - config.MAP_START[0]) / float(grid[0])
part_lon = (config.MAP_END[1] - config.MAP_START[1]) / float(grid[1])
start_lat = config.MAP_START[0] + part_lat * row + part_lat / 2
start_lon = config.MAP_START[1] + part_lon * column + part_lon / 2
return start_lat, start_lon
def float_range(start, end, step):
"""xrange for floats, also capable of iterating backwards"""
if start > end:
while end < start:
yield start
start += -step
else:
while start < end:
yield start
start += step
def get_gains():
"""Returns lat and lon gain
Gain is space between circles.
"""
start = Point(*get_map_center())
base = config.SCAN_RADIUS * math.sqrt(3)
height = base * math.sqrt(3) / 2
dis_a = distance.VincentyDistance(meters=base)
dis_h = distance.VincentyDistance(meters=height)
lon_gain = dis_a.destination(point=start, bearing=90).longitude
lat_gain = dis_h.destination(point=start, bearing=0).latitude
return abs(start.latitude - lat_gain), abs(start.longitude - lon_gain)
def get_points_per_worker():
"""Returns all points that should be visited for whole grid"""
total_workers = config.GRID[0] * config.GRID[1]
lat_gain, lon_gain = get_gains()
points = [[] for _ in range(total_workers)]
total_rows = math.ceil(
abs(config.MAP_START[0] - config.MAP_END[0]) / lat_gain
)
total_columns = math.ceil(
abs(config.MAP_START[1] - config.MAP_END[1]) / lon_gain
)
for map_row, lat in enumerate(
float_range(config.MAP_START[0], config.MAP_END[0], lat_gain)
):
row_start_lon = config.MAP_START[1]
odd = map_row % 2 != 0
if odd:
row_start_lon -= 0.5 * lon_gain
for map_col, lon in enumerate(
float_range(row_start_lon, config.MAP_END[1], lon_gain)
):
# Figure out which worker this should go to
grid_row = int(map_row / float(total_rows) * config.GRID[0])
grid_col = int(map_col / float(total_columns) * config.GRID[1])
if map_col >= total_columns: # should happen only once per 2 rows
grid_col -= 1
worker_no = grid_row * config.GRID[1] + grid_col
points[worker_no].append((lat, lon))
points = [
sort_points_for_worker(p, i)
for i, p in enumerate(points)
]
return points
def sort_points_for_worker(points, worker_no):
center = get_start_coords(worker_no)
return sorted(points, key=lambda p: get_distance(p, center))
def get_distance(p1, p2):
return math.sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2))
| 31.956897
| 78
| 0.635015
|
d79c993e9f29622b4d8731b4a5ba841a364ab45e
| 9,314
|
py
|
Python
|
src/django_clickhouse/engines.py
|
amrhgh/django-clickhouse
|
01cec7c999002976ea7b43a0833d8a1add42cf41
|
[
"MIT"
] | 74
|
2018-11-30T04:13:26.000Z
|
2022-03-15T23:11:48.000Z
|
src/django_clickhouse/engines.py
|
amrhgh/django-clickhouse
|
01cec7c999002976ea7b43a0833d8a1add42cf41
|
[
"MIT"
] | 24
|
2019-02-03T09:07:05.000Z
|
2022-01-04T10:28:03.000Z
|
src/django_clickhouse/engines.py
|
amrhgh/django-clickhouse
|
01cec7c999002976ea7b43a0833d8a1add42cf41
|
[
"MIT"
] | 28
|
2018-12-11T15:14:17.000Z
|
2022-03-16T00:20:45.000Z
|
"""
This file contains wrappers for infi.clckhouse_orm engines to use in django-clickhouse
"""
import datetime
import logging
from typing import List, Type, Union, Iterable, Optional, Tuple, NamedTuple
from django.db.models import Model as DjangoModel
from infi.clickhouse_orm import engines as infi_engines
from statsd.defaults.django import statsd
from .clickhouse_models import ClickHouseModel
from .configuration import config
from .database import connections
from .utils import format_datetime
logger = logging.getLogger('django-clickhouse')
class InsertOnlyEngineMixin:
def get_insert_batch(self, model_cls: Type[ClickHouseModel], objects: List[DjangoModel]) -> Iterable[tuple]:
"""
Gets a list of model_cls instances to insert into database
:param model_cls: ClickHouseModel subclass to import
:param objects: A list of django Model instances to sync
:return: A generator of model_cls named tuples
"""
serializer = model_cls.get_django_model_serializer(writable=True)
return (serializer.serialize(obj) for obj in objects)
class MergeTree(InsertOnlyEngineMixin, infi_engines.MergeTree):
pass
class ReplacingMergeTree(InsertOnlyEngineMixin, infi_engines.ReplacingMergeTree):
pass
class SummingMergeTree(InsertOnlyEngineMixin, infi_engines.SummingMergeTree):
pass
class CollapsingMergeTree(InsertOnlyEngineMixin, infi_engines.CollapsingMergeTree):
pk_column = 'id'
def __init__(self, *args, **kwargs):
self.version_col = kwargs.pop('version_col', None)
super(CollapsingMergeTree, self).__init__(*args, **kwargs)
def _get_final_versions_by_version(self, db_alias: str, model_cls: Type[ClickHouseModel], object_pks: Iterable[str],
columns: str, date_range_filter: str = '') -> List[NamedTuple]:
"""
Performs request to ClickHouse in order to fetch latest version for each object pk
:param db_alias: ClickHouse database alias used
:param model_cls: Model class for which data is fetched
:param object_pks: Objects primary keys to filter by
:param columns: Columns to fetch
:param date_range_filter: Optional date_range_filter which speeds up query if date_col is set
:return: List of named tuples with requested columns
"""
if date_range_filter:
date_range_filter = 'PREWHERE {}'.format(date_range_filter)
query = """
SELECT {columns}
FROM $table
{date_range_filter}
WHERE `{pk_column}` IN ({object_pks})
ORDER BY `{pk_column}`, `{version_col}` DESC
LIMIT 1 BY `{pk_column}`
""".format(columns=','.join(columns), version_col=self.version_col, pk_column=self.pk_column,
date_range_filter=date_range_filter, object_pks=','.join(object_pks), sign_col=self.sign_col)
return connections[db_alias].select_tuples(query, model_cls)
def _get_final_versions_by_final(self, db_alias: str, model_cls: Type[ClickHouseModel], object_pks: Iterable[str],
columns: str, date_range_filter: str = '') -> List[NamedTuple]:
"""
Performs request to ClickHouse in order to fetch latest version for each object pk
:param db_alias: ClickHouse database alias used
:param model_cls: Model class for which data is fetched
:param object_pks: Objects primary keys to filter by
:param columns: Columns to fetch
:param date_range_filter: Optional date_range_filter which speeds up query if date_col is set
:return: List of named tuples with requested columns
"""
if date_range_filter:
date_range_filter += ' AND'
query = """
SELECT {columns} FROM $table FINAL
WHERE {date_range_filter} `{pk_column}` IN ({object_pks})
"""
query = query.format(columns=','.join(columns), pk_column=self.pk_column, date_range_filter=date_range_filter,
object_pks=','.join(object_pks))
return connections[db_alias].select_tuples(query, model_cls)
def _get_date_rate_filter(self, objects, model_cls: Type[ClickHouseModel], db_alias: str,
date_col: Optional[str]) -> str:
"""
Generates datetime filter to speed up final queries, if date_col is present
:param objects: Objects, which are inserted
:param model_cls: Model class for which data is fetched
:param db_alias: ClickHouse database alias used
:param date_col: Optional column name, where partition date is hold. Defaults to self.date_col
:return: String to add to WHERE or PREWHERE query section
"""
def _dt_to_str(dt: Union[datetime.date, datetime.datetime]) -> str:
if isinstance(dt, datetime.datetime):
return format_datetime(dt, 0, db_alias=db_alias)
elif isinstance(dt, datetime.date):
return dt.isoformat()
else:
raise Exception('Invalid date or datetime object: `%s`' % dt)
date_col = date_col or self.date_col
if not date_col:
logger.warning('django-clickhouse: date_col is not provided for model %s.'
' This can cause significant performance problems while fetching data.'
' It is worth inheriting CollapsingMergeTree engine with custom get_final_versions() method,'
' based on your partition_key' % model_cls)
return ''
min_date, max_date = None, None
for obj in objects:
obj_date = getattr(obj, date_col)
if min_date is None or min_date > obj_date:
min_date = obj_date
if max_date is None or max_date < obj_date:
max_date = obj_date
min_date = _dt_to_str(min_date)
max_date = _dt_to_str(max_date)
return "`{date_col}` >= '{min_date}' AND `{date_col}` <= '{max_date}'".\
format(min_date=min_date, max_date=max_date, date_col=date_col)
def get_final_versions(self, model_cls: Type[ClickHouseModel], objects: Iterable[DjangoModel],
date_col: Optional[str] = None) -> Iterable[tuple]:
"""
Get objects, that are currently stored in ClickHouse.
Depending on the partition key this can be different for different models.
In common case, this method is optimized for date field that doesn't change.
It also supposes primary key to by self.pk_column
:param model_cls: ClickHouseModel subclass to import
:param objects: Objects for which final versions are searched
:param date_col: Optional column name, where partition date is hold. Defaults to self.date_col
:return: A generator of named tuples, representing previous state
"""
if not objects:
raise StopIteration()
object_pks = [str(getattr(obj, self.pk_column)) for obj in objects]
db_alias = model_cls.get_database_alias()
date_range_filter = self._get_date_rate_filter(objects, model_cls, db_alias, date_col)
# Get fields. Sign is replaced to negative for further processing
columns = list(model_cls.fields(writable=True).keys())
columns.remove(self.sign_col)
columns.append('-1 AS sign')
params = (db_alias, model_cls, object_pks, columns, date_range_filter)
if self.version_col:
return self._get_final_versions_by_version(*params)
else:
return self._get_final_versions_by_final(*params)
def get_insert_batch(self, model_cls: Type[ClickHouseModel], objects: List[DjangoModel]) -> Iterable[tuple]:
"""
Gets a list of model_cls instances to insert into database
:param model_cls: ClickHouseModel subclass to import
:param objects: A list of django Model instances to sync
:return: A list of model_cls objects
"""
defaults = {self.sign_col: 1}
if self.version_col:
defaults[self.version_col] = 1
serializer = model_cls.get_django_model_serializer(writable=True, defaults=defaults)
new_objs = [serializer.serialize(obj) for obj in objects]
statsd_key = "%s.sync.%s.steps.get_final_versions" % (config.STATSD_PREFIX, model_cls.__name__)
with statsd.timer(statsd_key):
# NOTE I don't use generator pattern here, as it move all time into insert.
# That makes hard to understand where real problem is in monitoring
old_objs = tuple(self.get_final_versions(model_cls, new_objs))
# -1 sign has been set get_final_versions()
old_objs_versions = {}
for obj in old_objs:
pk = getattr(obj, self.pk_column)
if self.version_col:
old_objs_versions[pk] = getattr(obj, self.version_col)
yield obj
# 1 sign is set by default in serializer
for obj in new_objs:
pk = getattr(obj, self.pk_column)
if self.version_col:
obj = obj._replace(**{self.version_col: old_objs_versions.get(pk, 0) + 1})
yield obj
| 44.352381
| 120
| 0.661048
|
f098310b2af93608d2178b9478a4b913a0f0bad8
| 6,736
|
py
|
Python
|
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/macpath.py
|
ChisdealHD/DetlasWorldLinux
|
336465a4df1a48c9a273329fc7a09d8099c4e4d5
|
[
"MIT"
] | 8
|
2016-11-24T09:38:31.000Z
|
2021-04-23T13:04:48.000Z
|
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/macpath.py
|
ChisdealHD/DetlasWorldLinux
|
336465a4df1a48c9a273329fc7a09d8099c4e4d5
|
[
"MIT"
] | 4
|
2018-02-22T07:42:13.000Z
|
2021-12-13T10:53:09.000Z
|
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/macpath.py
|
ChisdealHD/DetlasWorldLinux
|
336465a4df1a48c9a273329fc7a09d8099c4e4d5
|
[
"MIT"
] | 4
|
2015-09-09T11:54:37.000Z
|
2018-05-26T05:08:14.000Z
|
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"realpath"]
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
| 27.720165
| 79
| 0.592933
|
f5cc659ead9e06da9d8626745939edb43f90c1bd
| 1,021
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/request_report_record_contract_paged.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/request_report_record_contract_paged.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/request_report_record_contract_paged.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RequestReportRecordContractPaged(Paged):
"""
A paging container for iterating over a list of :class:`RequestReportRecordContract <azure.mgmt.apimanagement.models.RequestReportRecordContract>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[RequestReportRecordContract]'}
}
def __init__(self, *args, **kwargs):
super(RequestReportRecordContractPaged, self).__init__(*args, **kwargs)
| 36.464286
| 157
| 0.610186
|
7c057ad5ca28d6c3b7ddc1a8b3f652749e92bd74
| 6,081
|
py
|
Python
|
components/isceobj/IsceProc/runCorrect.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,133
|
2022-01-07T21:24:57.000Z
|
2022-01-07T21:33:08.000Z
|
components/isceobj/IsceProc/runCorrect.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 276
|
2019-02-10T07:18:28.000Z
|
2022-03-31T21:45:55.000Z
|
components/isceobj/IsceProc/runCorrect.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 235
|
2019-02-10T05:00:53.000Z
|
2022-03-18T07:37:24.000Z
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Authors: Kosal Khun, Marco Lavalle
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Comment: Adapted from InsarProc/runCorrect.py
import logging
import isceobj
import stdproc
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
import os
logger = logging.getLogger('isce.isce.runCorrect')
def runCorrect(self):
refScene = self._isce.refScene
velocity, height = self._isce.vh()
infos = {}
for attribute in ['dopplerCentroid', 'peg', 'lookSide', 'numberRangeLooks', 'numberAzimuthLooks', 'topophaseMphFilename', 'topophaseFlatFilename', 'heightSchFilename', 'is_mocomp']:
infos[attribute] = getattr(self._isce, attribute)
infos['refOutputPath'] = os.path.join(self.getoutputdir(refScene), refScene)
stdWriter = self._stdWriter
refScene = self._isce.refScene
refPol = self._isce.refPol
refPair = self._isce.selectedPairs[0]#ML 2014-09-26
topoIntImage = self._isce.topoIntImages[refPair][refPol]
for sceneid1, sceneid2 in self._isce.selectedPairs:
pair = (sceneid1, sceneid2)
objMocompbaseline = self._isce.mocompBaselines[pair]
for pol in self._isce.selectedPols:
frame1 = self._isce.frames[sceneid1][pol]
objFormSLC1 = self._isce.formSLCs[sceneid1][pol]
topoIntImage = self._isce.topoIntImages[pair][pol] #ML 2014-09-26
intImage = isceobj.createIntImage()
IU.copyAttributes(topoIntImage, intImage)
intImage.setAccessMode('read')
sid = self._isce.formatname(pair, pol)
infos['outputPath'] = os.path.join(self.getoutputdir(sceneid1, sceneid2), sid)
catalog = isceobj.Catalog.createCatalog(self._isce.procDoc.name)
run(frame1, objFormSLC1, objMocompbaseline, intImage, velocity, height, infos, stdWriter, catalog=catalog, sceneid=sid)
def run(frame1, objFormSLC1, objMocompbaseline, intImage, velocity, height, infos, stdWriter, catalog=None, sceneid='NO_ID'):
logger.info("Running correct: %s" % sceneid)
#intImage = isceobj.createIntImage()
##just pass the image object to Correct and it will handle the creation
## and deletion of the actual image pointer
#IU.copyAttributes(topoIntImage, intImage)
posIndx = 1
mocompPosition1 = objFormSLC1.mocompPosition
centroid = infos['dopplerCentroid'].getDopplerCoefficients(inHz=False)[0]
planet = frame1.instrument.platform.planet
prf1 = frame1.instrument.PRF
objCorrect = stdproc.createCorrect()
objCorrect.wireInputPort(name='peg', object=infos['peg'])
objCorrect.wireInputPort(name='frame', object=frame1)
objCorrect.wireInputPort(name='planet', object=planet)
objCorrect.wireInputPort(name='interferogram', object=intImage)
objCorrect.wireInputPort(name='referenceslc', object=objFormSLC1) #Piyush
#objCorrect.setDopplerCentroidConstantTerm(centroid) #ML 2014-08-05
# Average velocity and height measurements
objCorrect.setBodyFixedVelocity(velocity)
objCorrect.setSpacecraftHeight(height)
# Need the reference orbit from Formslc
objCorrect.setReferenceOrbit(mocompPosition1[posIndx])
objCorrect.setMocompBaseline(objMocompbaseline.baseline)
sch12 = objMocompbaseline.getSchs()
objCorrect.setSch1(sch12[0])
objCorrect.setSch2(sch12[1])
sc = objMocompbaseline.sc
objCorrect.setSc(sc)
midpoint = objMocompbaseline.midpoint
objCorrect.setMidpoint(midpoint)
objCorrect.setLookSide(infos['lookSide'])
objCorrect.setNumberRangeLooks(infos['numberRangeLooks'])
objCorrect.setNumberAzimuthLooks(infos['numberAzimuthLooks'])
objCorrect.setTopophaseMphFilename(infos['outputPath'] + '.' + infos['topophaseMphFilename'])
objCorrect.setTopophaseFlatFilename(infos['outputPath'] + '.' + infos['topophaseFlatFilename'])
objCorrect.setHeightSchFilename(infos['refOutputPath'] + '.' + infos['heightSchFilename'])
objCorrect.setISMocomp(infos['is_mocomp'])
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
objCorrect.stdWriter = stdWriter.set_file_tags("correct",
"log",
"err",
"out")
objCorrect()#.correct()
if catalog is not None:
# Record the inputs and outputs
isceobj.Catalog.recordInputsAndOutputs(catalog, objCorrect,
"runCorrect.%s" % sceneid,
logger,
"runCorrect.%s" % sceneid)
return objCorrect
| 44.386861
| 185
| 0.682782
|
e30893912d95668cf97c4fdb415c1a7be9bb0777
| 51,486
|
py
|
Python
|
discord_bot_3.6.py
|
x3l51/discord_bot
|
15bc2384870370390ef4294c21de93ae6e6c88a2
|
[
"MIT"
] | 6
|
2018-08-15T22:54:11.000Z
|
2021-03-19T12:43:04.000Z
|
discord_bot_3.6.py
|
x3l51/discord_bot
|
15bc2384870370390ef4294c21de93ae6e6c88a2
|
[
"MIT"
] | 21
|
2018-08-15T11:25:19.000Z
|
2019-12-20T09:15:07.000Z
|
discord_bot_3.6.py
|
x3l51/discord_bot
|
15bc2384870370390ef4294c21de93ae6e6c88a2
|
[
"MIT"
] | 3
|
2018-08-15T17:39:09.000Z
|
2019-01-20T10:54:20.000Z
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# bot_bcad_3.6.py
import os
import sys
import re
import ast
import asyncio
import datetime
import json
import random
import time
import logging
from datetime import datetime
from datetime import timedelta
from xml.etree import ElementTree
import aiohttp
import async_timeout
import discord
import praw
import pytz
import wikipedia
import requests
import youtube_dl
from discord.ext import commands
from pytz import timezone
from wiktionaryparser import WiktionaryParser
import memberList
import messages
import utilities
with open('config.json', 'r') as json_file:
data = json.load(json_file)
for p in data['TOKEN']:
TOKEN = p['value']
for p in data['REDDIT']:
json_client_id = p['client_id']
json_client_secret = p['client_secret']
json_user_agent = p['user_agent']
for p in data['GOODREADS']:
goodreads_key = p['goodreads_key']
for p in data ['PREFIX']:
prefix_choice = p['prefix']
for p in data ['COUNTER']:
reaction_trigger_pull = p['counter_reac']
cmd_trigger_pull = p['counter_cmd']
for p in data ['UPTIME']:
uptime_pull = p['uptime']
for p in data ['BOT_OWNER_ID']:
bot_owner_id = p['bot_owner_id']
for p in data ['STATUS']:
status_pull = p['status']
description = 'Sir Henry Pickles, the pickly Bot!'
bot = commands.Bot(max_messages=10000, command_prefix=commands.when_mentioned_or(prefix_choice), case_insensitive=True)
reddit = praw.Reddit(client_id=json_client_id, client_secret=json_client_secret, user_agent=json_user_agent)
start_time = time.time()
bot.remove_command('help')
random.seed(a=None)
logger_discord_info = logging.getLogger('discord')
logger_discord_info.setLevel(logging.DEBUG)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
handler_discord_info = logging.FileHandler(filename='./logs/discord_info.log', encoding='utf-8', mode='a+')
handler_discord_info.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger_discord_info.addHandler(handler_discord_info)
async def fetch(session, url):
with async_timeout.timeout(10):
async with session.get(url) as response:
return await response.text()
async def create_chan_log():
for guild in bot.guilds:
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
if not discord.utils.get(guild.channels, name='logs'):
await guild.create_text_channel('logs', overwrites=overwrites, position=1, topic='logs')
async def create_chan_suggestions():
for guild in bot.guilds:
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
if not discord.utils.get(guild.channels, name='suggestions'):
await guild.create_text_channel('suggestions', overwrites=overwrites, position=2, topic='suggestions')
@bot.event
async def on_connect():
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Connected to discord\n\
------\
')
print(usage_string)
@bot.event
async def on_disconnect():
total_uptime_save()
reaction_trigger_save()
cmd_trigger_save()
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Lost connection to discord\n\
------\
')
print(usage_string)
try:
await bot.connect(reconnect=True)
except Exception as e:
print(e)
reloadRaw()
@bot.event
async def on_error(event, *args, **kwargs):
total_uptime_save()
reaction_trigger_save()
cmd_trigger_save()
import traceback
print(args[0])
message = args[0]
print(message)
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {message.author.name}\n\
Author Name: {message.author}\n\
Author ID: {message.author.id}\n\
Channel: {message.channel}\n\
Server: {message.guild}\n\
Server ID: {message.guild.id}\n\
\n\
ON ERROR\n\
\n\
ERROR EVENT: {event}\n\
ERROR ARGS: {args}\n\
ERROR TRACEBACK: {traceback.format_exc()}\n\
COMPLETE MESSAGE: {message.content}\n\
------\
')
print(usage_string)
for channel in message.guild.channels:
if channel.name == 'logs':
await channel.send(embed=discord.Embed(title="ERROR", color=0xff0000)
.add_field(name="EVENT", value=event, inline = False)
.add_field(name="ARGS", value=args, inline = False)
.add_field(name="TRACEBACK", value=traceback.format_exc(), inline = False)
.set_thumbnail(url=message.author.avatar_url))
return await channel.send("<@!"+bot_owner_id+">")
@bot.event
async def on_ready():
boot = discord.Game(name="BOOTING")
boot_fin = discord.Game(name="BOOT FINISHED")
err = discord.Game(name="EXIT CODE 0")
stat = discord.Game(name=status_pull)
print(f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Logged in as\n\
Bot Name: {bot.user.name}\n\
Bot ID: {bot.user.id}\n\
------\
')
await create_chan_log()
await create_chan_suggestions()
await bot.change_presence(status=discord.Status.idle, activity=boot)
await asyncio.sleep(3)
await bot.change_presence(status=discord.Status.idle, activity=boot_fin)
await asyncio.sleep(3)
await bot.change_presence(status=discord.Status.online, activity=err)
await asyncio.sleep(3)
await bot.change_presence(status=discord.Status.online, activity=stat)
@bot.event
async def on_member_remove(member):
print("on member remove")
@bot.event
async def on_member_ban(guild, user):
print("on member ban")
@bot.event
async def on_member_unban(guild, user):
print("on member unban")
@bot.event
async def on_member_join(member):
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {member.name}\n\
Author Name: {member}\n\
Author ID: {member.id}\n\
Newly joined this server:`\n\
Server: {member.guild}\n\
Server ID: {member.guild.id}\n\
------\
')
print(usage_string)
for channel in member.guild.channels:
if channel.name == 'general':
with open('config.json', 'r') as json_file:
data = json.load(json_file)
for p in data ['PREFIX']:
prefix_choice = p['prefix']
welm = (f"Welcome to ")
delm = (f"`{member.guild}`!")
desm = (f'Enjoy the guild.')
vasm = (f'Type `{prefix_choice}help` to learn all my commands.\n Now go and have some fun, {member.mention} <3')
await channel.send(embed=discord.Embed(title=welm, description=delm, color=0x28e778)
.add_field(name=desm, value=vasm, inline = False)
.set_thumbnail(url=member.avatar_url))
for channel in member.guild.channels:
if channel.name == 'logs':
welm = (f"Welcome to `{member.guild}`!")
desm = (f'ONE OF US!\n A warm welcome to: {member.mention} <3')
return await channel.send(embed=discord.Embed(title=welm, description=desm, color=0x28e778)
.set_thumbnail(url=member.avatar_url))
@bot.event
async def on_guild_join(guild):
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
HENRY JUST JOINED A NEW SERVER\n\
Server: {guild}\n\
Server ID: {guild.id}\n\
------\
')
print(usage_string)
msg = (f'Sir Henry just joined the Server: `{guild}` `(Server ID: {guild.id})`')
user = None
for guild in bot.guilds:
user = discord.utils.get(guild.members, id = "410406332143763466")
if user is not None:
break
await message.user.send(msg)
@bot.event
async def on_message_delete(message):
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {message.author.name}\n\
Author Name: {message.author}\n\
Author ID: {message.author.id}\n\
Event: Deleted message\n\
Full Message: {message.content}\n\
Channel: {message.channel}\n\
Server: {message.guild}\n\
Server ID: {message.guild.id}\n\
------\
')
print(usage_string)
try:
for channel in message.guild.channels:
if channel.name == 'logs':
auth = (f'<@!{message.author.id}> ({message.author})')
chan = (f'#{message.channel}')
return await channel.send(embed=discord.Embed(title="Message deleted", color=0xeee657)
.add_field(name="Channel", value=chan, inline = False)
.add_field(name="Message Author", value=auth, inline = False)
.add_field(name="Message Author ID", value=message.author.id, inline = False)
.add_field(name="Message ID", value=message.id, inline = False)
.add_field(name="Message", value=message.content, inline = False)
.set_thumbnail(url=message.author.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
except:
return
@bot.event
async def on_message_edit(before, after):
if before.author != bot.user:
if before.content != after.content:
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {before.author.name}\n\
Author Name: {before.author}\n\
Author ID: {before.author.id}\n\
Edited a message`\n\
Channel: {before.channel}\n\
Server: {before.channel.guild}\n\
Server ID: {before.channel.guild.id}\n\
------\
')
print(usage_string)
try:
for channel in before.guild.channels:
if channel.name == 'logs':
auth = (f'<@!{before.author.id}> ({before.author})')
chan = (f'#{before.channel}')
return await channel.send(embed=discord.Embed(title="Message edited", color=0xeee657)
.add_field(name="Channel", value=chan, inline = False)
.add_field(name="Message Author", value=auth, inline = False)
.add_field(name="Message Author ID", value=before.author.id, inline = False)
.add_field(name="Message ID", value=after.id, inline = False)
.add_field(name="Message before", value=before.content, inline = False)
.add_field(name="Message after", value=after.content, inline = False)
.set_thumbnail(url=after.author.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
except:
return
@bot.event
async def on_guild_channel_create(channel):
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
A channel was created\n\
Channel: {channel.name}\n\
Server: {channel.guild}\n\
Server ID: {channel.guild.id}\n\
------\
')
print(usage_string)
for channels in channel.guild.channels:
if channels.name == 'logs':
return await channels.send(embed=discord.Embed(title="Channel creation", color=0xeee657)
.add_field(name="Channel", value=channel.name, inline = False)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
@bot.event
async def on_guild_channel_delete(channel):
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
A channel was deleted\n\
Channel: {channel.name}\n\
Server: {channel.guild}\n\
Server ID: {channel.guild.id}\n\
------\
')
print(usage_string)
for channels in channel.guild.channels:
if channels.name == 'logs':
return await channels.send(embed=discord.Embed(title="Channel deletion", color=0xeee657)
.add_field(name="Channel", value=channel.name, inline = False)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
@bot.event
async def on_guild_channel_update(before, after):
print("on guild channel change")
@bot.event
async def on_guild_channel_pins_update(channel, last_pin):
print("on guild channel pin update")
@bot.event
async def on_command_error(ctx, error):
total_uptime_save()
reaction_trigger_save()
cmd_trigger_save()
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {ctx.message.author.name}\n\
Author Name: {ctx.message.author}\n\
Author ID: {ctx.message.author.id}\n\
Channel: {ctx.message.channel}\n\
Server: {ctx.message.guild}\n\
Server ID: {ctx.message.guild.id}\n\
\n\
ON COMMAND ERROR\n\
')
print(usage_string)
try:
error = getattr(error, 'original', error)
error_message = (f'\
ERROR MESSAGE: {error}\n\
COMPLETE MESSAGE: {ctx.message.content}\n\
------\
')
print(error_message)
list = ["What now, ", "There's no such command, ", "Need help, ", "Thou shalt not speak the language of the peasants, "]
return await ctx.send(random.choice(list) + ctx.author.mention + "?")
except RuntimeError as err:
print(f'\
ERROR MESSAGE: {err}\n\
------\
')
@bot.command()#
async def youtube(ctx, url=""):
pass
# TODO
# @bot.command()#
# async def youtube(ctx, keyword_raw, url=""):
# usage_string = (f'[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}] ID: {ctx.message.author.id} (Name: {ctx.message.author.name}) used `{ctx.command.name}` in channel: {ctx.message.channel} (Server: {ctx.message.guild})')
# print(usage_string)
# print('------')
# cmd_trigger()
# joining = (f'Okay, I\'m here. What now?')
# leaving = (f'Okay, okay, I\'ll leave! Jeez, calm down.')
# stopping = (f'Stopped playing.')
# pausing = (f'Paused playing.')
# resuming = (f'Resumed playing.')
# volume = (f'Changing volume to {url}%.')
# channel = ctx.message.author.voice.voice_channel
# voice = bot.join_voice_channel(channel)
# try:
# if keyword_raw == "join":
# await ctx.send(joining)
# await voice
# if keyword_raw == "leave":
# await ctx.send(leaving)
# for x in bot.voice_clients:
# if(x.guild == ctx.message.guild):
# return await x.disconnect()
# # if a song is already playing nothing happens
# if keyword_raw == "play":
# if url is "":
# return ctx.send("You got to give me a YouTube URL, stupid! `!youtube play URL_HERE`")
# voice = await bot.join_voice_channel(channel)
# global player
# player = await voice.create_ytdl_player(url)
# player.start()
# playing = (f'`Now playing {player.title}!`')
# await ctx.send(playing)
# return player
# if keyword_raw == "stop":
# await ctx.send(stopping)
# player.stop()
# if keyword_raw == "pause":
# await ctx.send(pausing)
# player.pause()
# if keyword_raw == "resume":
# await ctx.send(resuming)
# player.resume()
# if keyword_raw == "volume":
# set_vol = (int(url)/100)
# if float(set_vol) <= 0:
# return await ctx.send("You can\'t do that, silly.")
# elif float(set_vol) > 1:
# return await ctx.send("You can\'t do that, silly.")
# else:
# await ctx.send(volume)
# player.volume = set_vol
# except:
# return await ctx.send("Whoops, " + ctx.message.author.mention + "! `!" + ctx.command.name + " " + keyword_raw + "` didn\'t work this time.\nI\'m probably already playing something or idk.\nProbably I broke.")
# if keyword_raw == "info":
# if player.is_playing():
# return await ctx.send(embed=discord.Embed(title="Info on Youtube Player", color=0xeee657)
# .add_field(name="Title", value=player.title, inline = False)
# .add_field(name="Description", value=player.description, inline = False)
# .add_field(name="Length", value=player.duration, inline = False)
# .add_field(name="URL", value=player.url, inline = False)
# .set_thumbnail(url=ctx.message.author.avatar_url)
# .set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
# else:
# return await ctx.send("There is nothing playing, silly!")
@bot.command()
async def say(ctx, serv, chan, *mes_raw):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
if commands.is_owner():
mes = ' '.join(mes_raw)
for guild in bot.guilds:
if guild.name == serv:
channel = discord.utils.get(guild.channels, name=chan)
return await ctx.message.channel.send(mes)
@bot.command()
async def leave(ctx, ID):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
try:
if commands.is_owner():
serv = bot.get_server(ID)
msg = (f'Sir Henry just left the Server: `{serv}` `(Server ID: {serv.id})`')
user = None
await bot.leave_server(serv)
for guild in bot.guilds:
user = discord.utils.get(guild.members, id = bot_owner_id)
if user is not None:
break
await message.user.send(msg)
except:
await ctx.send("`Something did go wrong. Please read the log.`")
else:
embed = discord.Embed(title="Permission", description=(ctx.message.author.mention + ', you\'re not a mod. You can\'t use `' + ctx.command.name + '`.'),
color=0xeee657)
return await ctx.send(embed=embed)
@bot.command()
async def userinfo(ctx, member : discord.Member = None):
printCtx(ctx)
cmd_trigger()
if member is None:
member = ctx.message.author
return await ctx.send(embed=discord.Embed(title=f"{member.name}'s User Information", color=0xeee657)
.add_field(name="Name", value="<@!"+str(member.id)+">", inline = False)
.add_field(name="Discriminator", value=member.discriminator, inline = False)
.add_field(name="ID", value=member.id, inline = False)
.add_field(name="This Server's ID", value=ctx.message.guild.id, inline = False)
.add_field(name="Highest Role", value=member.top_role.name, inline = False)
.add_field(name="Avatar Url", value=member.avatar_url, inline = False)
.add_field(name="Joined Discord", value=member.created_at, inline = False)
.add_field(name="Joined Server", value=member.joined_at, inline = False)
.add_field(name="Bot", value=member.bot, inline = False)
.set_thumbnail(url=member.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
@bot.command()
async def suggestion(ctx):
printCtx(ctx)
cmd_trigger()
try:
for channel in ctx.message.guild.channels:
if channel.name == 'suggestions':
await channel.send(embed=discord.Embed(title="Suggestion Author", description=ctx.message.author.name, color=0xeee657)
.add_field(name="Suggestion Message", value=ctx.message.content))
return await ctx.send(embed=discord.Embed(title="Suggestion received", color=0xeee657))
except:
return
def reloadRaw():
if os.name is "nt":
os.execv(sys.executable, ['python3.6'] + sys.argv)
else:
import ctypes
argc = ctypes.c_int()
argv = ctypes.POINTER(ctypes.c_wchar_p if sys.version_info >= (3, ) else ctypes.c_char_p)()
ctypes.pythonapi.Py_GetArgcArgv(ctypes.byref(argc), ctypes.byref(argv))
os.execv(sys.executable, ['python3.6'] + [argv[1]] + sys.argv)
@bot.command()
async def reload(ctx):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
if commands.is_owner() or ctx.message.author.guild is "The Eclectic Collective":
await ctx.send("`Reloading modules. Restarting connection.`")
sd = discord.Game(name="REBOOT")
bye = discord.Game(name="BYE")
await bot.change_presence(status=discord.Status.idle, activity=sd)
await asyncio.sleep(3)
await bot.change_presence(status=discord.Status.idle, activity=bye)
await asyncio.sleep(1)
await bot.change_presence(status=discord.Status.offline)
total_uptime_save()
reaction_trigger_save()
cmd_trigger_save()
if bot.is_ready():
await ctx.send("`Values saved`")
await ctx.send("`Logout`")
reloadRaw()
else:
embed = discord.Embed(title="Notification", description=("<@!"+bot_owner_id+">, " + ctx.message.author.mention + " wants to reload the bot."),
color=0xeee657)
return await ctx.send(embed=embed)
else:
embed = discord.Embed(title="Permission", description=(ctx.message.author.mention + ', you\'re not a mod. You can\'t use this command.'),
color=0xeee657)
return await ctx.send(embed=embed)
@bot.command()
@commands.is_owner()
async def quit(ctx):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
if commands.is_owner():
await ctx.send("`Shutdown requested`")
sd = discord.Game(name="SHUTDOWN")
bye = discord.Game(name="BYE")
await bot.change_presence(status=discord.Status.idle, activity=sd)
await asyncio.sleep(3)
await bot.change_presence(status=discord.Status.idle, activity=bye)
total_uptime_save()
reaction_trigger_save()
cmd_trigger_save()
if bot.is_ready():
await ctx.send("`Values saved`")
await ctx.send("`Logout`")
await bot.logout()
return sys.exit(0)
else:
embed = discord.Embed(title="Notification", description=("<@!"+bot_owner_id+">, " + ctx.message.author.mention + " wants to quit the bot."),
color=0xeee657)
return await ctx.send(embed=embed)
else:
embed = discord.Embed(title="Permission", description=(ctx.message.author.mention + ', you\'re not a mod. You can\'t use this command.'),
color=0xeee657)
return await ctx.send(embed=embed)
@bot.command()
async def prefix(ctx, prefix_raw):
printCtx(ctx)
cmd_trigger()
if prefix_raw == "show":
with open('config.json', 'r') as json_file:
data = json.load(json_file)
for p in data ['PREFIX']:
prefix_choice = p['prefix']
embed = discord.Embed(description=("Actual prefix is: " + prefix_choice), color=0xeee657)
return await ctx.send(embed=embed)
else:
if ctx.message.author.guild_permissions.administrator:
if commands.is_owner():
with open('config.json', 'r') as json_file:
data = json.load(json_file)
data["PREFIX"][0]["prefix"] = prefix_raw
with open('config.json', 'w') as outfile:
json.dump(data, outfile)
statusMsg = (f'{prefix_raw}help')
status_pref = discord.Game(name=statusMsg)
saveStatus(statusMsg)
await bot.change_presence(status=discord.Status.online, activity=status_pref)
bot.command_prefix = commands.when_mentioned_or(prefix_raw)
return await ctx.send(embed=discord.Embed( description=("Prefix successfully set to: " + prefix_raw), color=0xeee657))
else:
return await ctx.send(embed=discord.Embed(
description=("<@!"+bot_owner_id+">, " + ctx.message.author.mention + " wants to have the prefix changed to " + prefix_raw + "."),
color=0xeee657))
else:
embed = discord.Embed(description=(ctx.message.author.mention + ', you\'re not a mod. You can\'t use this command.'),
color=0xeee657)
return await ctx.send(embed=embed)
@bot.command()
async def status(ctx, *status_raw):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
status_arg = ' '.join(status_raw)
activity_stat = discord.Game(name=status_arg)
saveStatus(status_arg)
# data["STATUS"][0]["status"] = status_arg
# with open('config.json', 'w') as outfile:
# json.dump(data, outfile)
await bot.change_presence(status=discord.Status.online, activity=(activity_stat))
return await ctx.send(embed=discord.Embed(title="Status changed to: ", description=("@Sir Henry Pickles playing " + status_arg), color=0xeee657))
else:
return await ctx.send(ctx.message.author.mention + ', you\'re not a mod. You can\'t use this command.')
def saveStatus(status_arg):
with open('config.json', 'r') as json_file:
data = json.load(json_file)
data["STATUS"][0]["status"] = status_arg
with open('config.json', 'w') as outfile:
json.dump(data, outfile)
@bot.command()
async def members(ctx):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
log_path = await memberList.membersLog(ctx)
for channel in ctx.message.guild.channels:
if channel.name == 'logs':
await channel.send(file=discord.File(log_path))
return await ctx.message.author.send(file=discord.File(log_path))
else:
return await ctx.send(ctx.message.author.mention + ', you\'re not a mod. You can\'t use this command.')
@bot.command()
async def members_show(ctx):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
members = await memberList.membersDump(ctx)
return await ctx.message.author.send(members)
else:
return await ctx.send(ctx.message.author.mention + ', you\'re not a mod. You can\'t use this command.')
@bot.command()
async def info(ctx):
printCtx(ctx)
cmd_trigger()
total_uptime_save()
time_lapsed = (time.time() - start_time)
total_uptime = time_lapsed + uptime_pull
link_build = (f'https://discordapp.com/oauth2/authorize?client_id={bot.user.id}&scope=bot&permissions=8')
timeDiff = timedelta(seconds=total_uptime)
years = int(int(timeDiff.days) / 365)
days = int(timeDiff.days) - (int(years) * 365)
uptimeStr = (f'Years: {years}, Days: {days}')
return await ctx.send(embed = discord.Embed(title="Sir Henry Pickles", description="Pickles are love, pickles are life!", color=0xeee657)
.set_thumbnail(url=bot.user.avatar_url)
.add_field(name="System Time:", value=utilities.epoch_to_custom_date(utilities.FMT_TIME), inline=False)
.add_field(name="Command count: ", value=cmd_trigger.Counter, inline=False)
.add_field(name="Message count: ", value=reaction_trigger.counter, inline=False)
.add_field(name="Server count: ", value=len(bot.guilds), inline=False)
.add_field(name="Uptime", value=timedelta(seconds=time_lapsed), inline=False)
.add_field(name="Total Uptime", value=uptimeStr, inline=False)
.add_field(name="GitHub Project Page:", value="https://github.com/x3l51/discord_bot", inline=False)
.add_field(name="Next features and progress on them:", value="https://github.com/x3l51/discord_bot/projects/1", inline=False)
.add_field(name="Direct invite to the Developers Discord:", value="https://discordapp.com/invite/5raBJUU", inline=False)
.add_field(name="Invite the Bot to your Discord Server:", value=link_build, inline=False)
.add_field(name="Author", value="<@!"+bot_owner_id+">")
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
@bot.command(name="time", ignore_extras=False)
async def cmd_time(ctx, *tz_keywords):
printCtx(ctx)
cmd_trigger()
tz_keyword = '_'.join(tz_keywords)
moon = ('Moon', 'moon')
moon_rep = ('Very funny, ' + ctx.message.author.mention, 'Wow, ' + ctx.message.author.mention,
'Oi, ' + ctx.message.author.mention + '! Go fork urself m8!',
'Maan, dude, idk maaan, like ... on the moon? duuuude .... DUUuUuuUuUUuDDDEeeeee. *hits blunt* idk ' + ctx.message.author.mention + ', better call the space tesla guy ..!?')
if tz_keyword in (moon):
await ctx.send("...")
await asyncio.sleep(2)
return await ctx.send(random.choice(moon_rep))
if tz_keyword is "":
tz_keyword = "GMT+0"
await ctx.send("No keyword given, so I'll give you `GMT+0`. Try `!time GMT+0` or `!time Denver` next time.")
valid_zones = []
for zone in pytz.all_timezones:
zones = zone.split('/')
region = ''
region_tz = ''
region_city = ''
if len(zones) == 1:
region = zones[0]
region_tz = ''
elif len(zones) == 2:
region, region_tz = zones[0], zones[1]
else:
region, region_tz, region_city = zones[0], zones[1], zones[2]
found = False
if region.lower().startswith(tz_keyword.lower()) and not found:
valid_zones.append('Time Zone: {} is {}'.format(zone, datetime.now(tz=timezone(zone))))
found = True
if region_tz.lower().startswith(tz_keyword.lower()) and not found:
valid_zones.append('Time Zone: {} is {}'.format(zone, datetime.now(tz=timezone(zone))))
found = True
if region_city.lower().startswith(tz_keyword.lower()) and not found:
valid_zones.append('Time Zone: {} is {}'.format(zone, datetime.now(tz=timezone(zone))))
else:
if len(valid_zones) == 0:
return await ctx.send('{} is an invalid timezone'.format(tz_keyword))
else:
msg = '\n'.join(valid_zones)
if len(msg) <= 2000:
await ctx.send(msg)
else:
current_len = 0
msg = ''
for idx, _msg in enumerate(valid_zones):
msg += '{}\n'.format(valid_zones[idx])
current_len = current_len + len(_msg)
try:
if current_len + len(valid_zones[idx + 1]) > 1950:
await ctx.send(msg)
msg = ''
current_len = 0
except IndexError:
return await ctx.send(msg)
@bot.command()
async def archive(ctx):
printCtx(ctx)
cmd_trigger()
for channel in ctx.message.guild.channels:
if channel.name == 'logs':
msg = (f'{ctx.message.author.mention} just created an archive of {ctx.message.channel.name}!')
await channel.send(msg)
return await log_messages(ctx)
async def log_messages(ctx):
cmd_trigger()
log_path = ("./logs/archive" + "-server-" + ctx.message.guild.name.replace(' ', '-') + "-channel-" + ctx.message.channel.name + "-" + (utilities.epoch_to_custom_date(utilities.FMT_TIME_FILE)) + ".log")
if ctx.message.author.guild_permissions.administrator:
async for m in ctx.message.channel.history(limit=None):
list_all = (f'Time (CET): {m.created_at}\nID: {m.author.id}\nName: {m.author} ({m.author.name})\nContent: {m.content}\n\n')
with open(log_path, 'a', encoding='utf-8') as file:
file.write(list_all)
for channel in ctx.message.guild.channels:
if channel.name == 'logs':
await channel.send(file=discord.File(log_path))
return await ctx.message.author.send(file=discord.File(log_path))
@bot.command()
async def clear(ctx, cle: int = 1000):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
for channel in ctx.message.guild.channels:
if channel.name == 'logs':
msg = (f'{ctx.message.author.mention} just created an archive of {ctx.message.channel.name} and cleared it!')
await channel.send(msg)
await log_messages(ctx)
if cle == 1000:
await ctx.message.channel.purge(bulk=True)
else:
limit=int(cle) + 1
await ctx.message.channel.purge(limit=limit, bulk=True)
cle_num = str(cle)
if cle == 1000:
num_cleared = "up to 1000 messages"
elif cle <= 0:
num_cleared = "your message because why would you want to clear " + cle_num + " messages!?"
elif cle == 1:
num_cleared = "1 message"
else:
num_cleared = str(cle) + " messages"
embed = discord.Embed(title="Channel has been cleared of " + num_cleared, color=0x00ff00)
embed.set_image(
url="https://media1.giphy.com/media/PAO4KoQ532CRi/giphy.gif")
await ctx.send(embed=embed)
return
else:
return await ctx.send(
ctx.message.author.mention + ', you have no permission to use this command.')
@bot.command()
async def test(ctx):
printCtx(ctx)
cmd_trigger()
return await ctx.send("successful")
@bot.command()
async def mod(ctx):
printCtx(ctx)
cmd_trigger()
if ctx.message.author.guild_permissions.administrator:
return await ctx.send(ctx.message.author.mention + ', you\'re a mod.')
else:
return await ctx.send(ctx.message.author.mention + ', you\'re not a mod.')
@bot.command(name='help', )
async def cmd_help(ctx):
printCtx(ctx)
cmd_trigger()
await ctx.message.author.send(
"If you are in need of immediate assistance, I kindly suggest you to call the emergency "
"services.\n "
"\n"
"----------\n"
"\n"
"**Name**: Sir Henry Pickles\n"
"**Description:** *Does his best.*\n"
)
for embed in messages.HELP_EMBEDS:
await ctx.message.author.send(embed=embed)
return await ctx.message.author.send("If you still have questions, please ping the `@Mods`")
@bot.command()
async def sleep(ctx):
printCtx(ctx)
cmd_trigger()
sleep = ['Yes, you should use the sleep.', 'But mooooom idonwanna!', 'Whatevs, man.', 'JA!']
return await ctx.send(random.choice(sleep))
@bot.command()
async def shower(ctx):
printCtx(ctx)
cmd_trigger()
shower = [' you reek already!', ' it`s about time...', ' nah, its cool.',
' I mean, have you already showered this week?', ' but only a golden shower.']
return await ctx.send(ctx.message.author.mention + " " + random.choice(shower))
@bot.command()
async def joke(ctx):
printCtx(ctx)
cmd_trigger()
return await ctx.send(embed=discord.Embed(title="Joke", description=random.choice(messages.JOKES), color=0x00ff00))
@bot.command(name='8ball', )
async def cmd_8ball(ctx):
printCtx(ctx)
cmd_trigger()
ball_res = ['It is certain.', 'It is decidedly so.', 'Without a doubt.', 'Yes - definitely.', 'You may rely on it.',
'As I see it, yes.', 'Most likely.', 'Outlook good.', 'Yes.', 'Signs point to yes.',
'Reply hazy, try again.', 'Ask again later.', 'Better not tell you now.', 'Connot predict now.',
'Concentrate and ask again.', 'Don`t count on it.', 'My reply is no.', 'My sources say no.',
'Outlook not so good.']
return await ctx.send(embed=discord.Embed(title="8Ball", description=random.choice(ball_res), color=0x00ff00))
@bot.command()
async def roll(ctx, dice_string, mod: int = 0):
printCtx(ctx)
cmd_trigger()
try:
count_raw, num_raw = dice_string.split("d")
if not count_raw:
count_raw = 1
count = int(count_raw)
num = int(num_raw)
await ctx.send("Rolling " + str(count) + " d" + str(num) + " ...")
await asyncio.sleep(2)
random.seed()
numbers = []
for count in range(count):
number = random.randint(1, num)
numbers.append(number)
num_ran_count = (sum(numbers))
if mod == 0:
await ctx.send("I rolled a " + str(num_ran_count) + " for you.")
else:
num_ran_count_mod = num_ran_count + mod
await ctx.send("I rolled " + str(num_ran_count) + " for you. That\'s a " + str(
num_ran_count_mod) + " with your modifier.")
except:
await ctx.send(
f'Error. Something didn\'t work out, <@{ctx.message.author.id}>. Check your formatting. Should it have been `1d{dice_string} {mod}`?')
@bot.command()
async def bleach(ctx):
printCtx(ctx)
cmd_trigger()
await ctx.send(random.choice(messages.BLEACHES))
@bot.command()
async def goodreads(ctx, *keyword_raw):
printCtx(ctx)
cmd_trigger()
keyword = "+".join(keyword_raw)
async with aiohttp.ClientSession() as session:
html = await fetch(session,'https://www.goodreads.com/search.xml?key=' + goodreads_key + '&q=' + keyword + '&page=1')
xml = ElementTree.fromstring(html)
for i, v in enumerate(xml.find('search/results')):
book = v.find('best_book')
author = book.find('author/name').text
title = book.find('title').text
book_id = book.find('id').text
result_list = (f'**{author}**: {title} - https://www.goodreads.com/book/show/{book_id}.It')
await ctx.send(result_list)
if i == 2:
break
@bot.command(name='reddit', )
async def cmd_reddit(ctx, subreddit_raw):
printCtx(ctx)
subreddit_input = str(subreddit_raw)
cmd_trigger()
x = int(0)
try:
for i, submission in enumerate(reddit.subreddit(subreddit_input).hot(limit=5)):
if reddit.subreddit(subreddit_input).over18:
await ctx.send("Please do not request NSFW results.")
for channel in ctx.message.guild.channels:
if channel.name == 'logs':
auth = (f'<@!{ctx.message.author.id}> ({ctx.message.author})')
chan = (f'#{ctx.message.channel}')
await channel.send(embed=discord.Embed(title="Requested NSFW content", color=0xeee657)
.add_field(name="Channel", value=chan, inline = False)
.add_field(name="Message Author", value=auth, inline = False)
.add_field(name="Message Author ID", value=ctx.message.author.id, inline = False)
.add_field(name="Message ID", value=ctx.message.id, inline = False)
.add_field(name="Message", value=ctx.message.content, inline = False)
.set_thumbnail(url=ctx.message.author.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
return await channel.send(ctx.message.guild.roles[-1].mention)
break
return
if submission.over_18:
continue
if submission.stickied:
continue
result_list = (f'{submission.url}')
if not submission.over_18 and not submission.stickied:
await ctx.send(result_list)
x = int(x + 1)
if x == 3:
break
except:
await ctx.send(
f'Error. Something didn\'t work out. Search for somthing else or some time else, <@{ctx.message.author.id}>')
@bot.command(name='wikipedia', )
async def cmd_wikipedia(ctx, *wiki_keyword_raw):
printCtx(ctx)
cmd_trigger()
wiki_error = "Error. Specify/ check/ rephrase your search query,"
try:
wiki_keyword = ' '.join(wiki_keyword_raw)
wiki_keyword_string = wikipedia.page(wiki_keyword, auto_suggest=True, redirect=True)
wiki_sum = wikipedia.summary(wiki_keyword_string, sentences=1, chars=100, auto_suggest=True, redirect=True)
wiki_url = wiki_keyword_string.url
embed_wiki = discord.Embed(title="Wikipedia", description=wiki_keyword, color=0x00ff00)
embed_wiki.add_field(name=wiki_sum, value=wiki_url)
await ctx.send(embed=embed_wiki)
except:
await ctx.send(f'{wiki_error} <@{ctx.message.author.id}>!')
if not wikipedia.search(wiki_keyword, results=3):
return
wiki_choice = ', '.join(wikipedia.search(wiki_keyword, results=3))
await ctx.send(f'Did you mean: {wiki_choice}?')
@bot.command()
async def wiktionary(ctx, *wikti_keyword_list):
printCtx(ctx)
wikti_keyword_raw = " ".join(wikti_keyword_list)
cmd_trigger()
wiki_error = "Error. Specify/ check/ rephrase your search query,"
parser = WiktionaryParser()
parser.set_default_language('english')
try:
def wiktionary__dict(the_fetch):
return ast.literal_eval(str(the_fetch).encode('ascii', 'ignore').decode('ascii'))[0]
word_to_define = wikti_keyword_raw.title()
response = wiktionary__dict(parser.fetch(word_to_define))['definitions'][0]
layout = '**{}** - ({})\n{}\n'.format(word_to_define, response['partOfSpeech'], response['text'])
word_to_define = wikti_keyword_raw.lower()
_response = wiktionary__dict(parser.fetch(word_to_define))['definitions'][0]
layout += '**{}** - ({})\n{}\n'.format(word_to_define, _response['partOfSpeech'], _response['text'])
embed_wikti = discord.Embed(title="Wiktionary", description=layout, color=0x00ff00)
await ctx.send(embed=embed_wikti)
except:
return await ctx.send(f'{wiki_error} <@{ctx.message.author.id}>!')
@bot.command()
async def python(ctx, *keywords_raw):
printCtx(ctx)
keywords_clean = '+'.join(keywords_raw)
url = ("https://docs.python.org/3.6/search.html?q=" + keywords_clean)
return await ctx.send("Here you go: " + url)
@bot.command()
async def roles(ctx):
printCtx(ctx)
cmd_trigger()
await ctx.send(ctx.message.author.mention + "\'s roles are:")
for r in ctx.message.author.roles:
roles_me = r.name
await ctx.send("`" + roles_me + "`")
def total_uptime_save():
time_lapsed = (time.time() - start_time)
total_uptime = time_lapsed + uptime_pull
with open('config.json', 'r') as json_file:
data = json.load(json_file)
data["UPTIME"][0]["uptime"] = total_uptime
with open('config.json', 'w') as json_file:
json.dump(data, json_file)
return
def reaction_trigger_save():
with open('config.json', 'r') as json_file:
data = json.load(json_file)
data["COUNTER"][0]["counter_reac"] = str(reaction_trigger.counter)
with open('config.json', 'w') as outfile:
json.dump(data, outfile)
return
def reaction_trigger():
count = int(reaction_trigger.counter)
count += 1
reaction_trigger.counter = count
if reaction_trigger.counter >= int(reaction_trigger_pull) + 100:
reaction_trigger_save()
return
reaction_trigger.counter = int(reaction_trigger_pull)
def cmd_trigger_save():
with open('config.json', 'r') as json_file:
data = json.load(json_file)
data["COUNTER"][0]["counter_cmd"] = str(cmd_trigger.Counter)
with open('config.json', 'w') as outfile:
json.dump(data, outfile)
return
def cmd_trigger():
count = int(cmd_trigger.Counter)
count += 1
cmd_trigger.Counter = count
if cmd_trigger.Counter >= int(cmd_trigger_pull) + 10:
cmd_trigger_save()
return
cmd_trigger.Counter = int(cmd_trigger_pull)
def printCtx(ctx):
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {ctx.message.author.name}\n\
Author Name: {ctx.message.author}\n\
Author ID: {ctx.message.author.id}\n\
Command: `{ctx.command.name}`\n\
Full Message: `{ctx.message.content}`\n\
Channel: {ctx.message.channel}\n\
Server: {ctx.message.guild}\n\
Server ID: {ctx.message.guild.id}\n\
------\
')
print(usage_string)
@bot.event
async def on_message(message):
reaction_trigger()
if message.author == bot.user:
return
if message.channel.type != discord.ChannelType.private:
if bot.user.mentioned_in(message) and not message.mention_everyone:
if any(x in message.content for x in messages.USER_GREETINGS):
return await message.channel.send(random.choice(messages.BOT_GREETINGS))
elif any(x in message.content for x in messages.USER_BYES):
return await message.channel.send(random.choice(messages.BOT_BYES))
else:
await message.add_reaction(random.choice(['🤖', '👀', '💾', '🤘']))
if 'USA' in message.content.upper():
await message.add_reaction(random.choice(['🇺🇸', '🍔', '🌭', '🔫']))
if 'NANI' in message.content.upper():
await message.channel.send('NAAAAANNNIIIIII!?!?!?!11')
if not message.channel.nsfw:
offensiveMatch = False
offensiveMatchList = []
for y in re.sub("[^\w]", " ", message.content).split():
for z in messages.OFFENSIVE_LANGUAGE:
if y.lower() == z.lower():
offensiveMatchList.append(y)
offensiveMatch = True
if offensiveMatch:
usage_string = (f'\
[TIME: {utilities.epoch_to_custom_date(utilities.FMT_TIME)}]\n\
Author Alias: {message.author.name}\n\
Author Name: {message.author}\n\
Author ID: {message.author.id}\n\
Event: Offensive Language\n\
Full Message: `{message.content}`\n\
Channel: {message.channel}\n\
Server: {message.guild}\n\
Server ID: {message.guild.id}\n\
------\
')
print(usage_string)
await message.channel.send(f'{message.author.mention}, please do not use this kind of language in non-NSFW marked channels. There are kids here.')
for channel in message.guild.channels:
if channel.name == 'logs':
if len(offensiveMatchList) > 1:
singularOrPlural = "Words:"
else:
singularOrPlural = "Word:"
await channel.send(embed=discord.Embed(title="Offensive Language", color=0xff0000)
.add_field(name="Author Alias:", value=message.author, inline = False)
.add_field(name="Author Name:", value=message.author.name, inline = False)
.add_field(name="Author ID:", value=message.author.id, inline = False)
.add_field(name="Server Name:", value=message.guild.name, inline = False)
.add_field(name="Server ID:", value=message.guild.id, inline = False)
.add_field(name="Channel:", value=message.channel.name, inline = False)
.add_field(name="Offensive " + singularOrPlural, value=(', '.join(offensiveMatchList)), inline = False)
.add_field(name="Original Message:", value=message.content, inline = False)
.add_field(name="State:", value="DELETED", inline = False)
.set_thumbnail(url=message.author.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
await channel.send(message.guild.roles[-1].mention)
break
messageClean = message.content
for matchNr in range(0, len(offensiveMatchList)):
messageClean = messageClean.replace(offensiveMatchList[matchNr], (len(offensiveMatchList[matchNr]) * "*"))
await message.channel.send(embed=discord.Embed(title="Offensive Language", color=0xff0000)
.add_field(name="Author Alias:", value=message.author, inline = False)
.add_field(name="Author Name:", value=message.author.name, inline = False)
.add_field(name="Author ID:", value=message.author.id, inline = False)
.add_field(name="Server Name:", value=message.guild.name, inline = False)
.add_field(name="Server ID:", value=message.guild.id, inline = False)
.add_field(name="Channel:", value=message.channel.name, inline = False)
.add_field(name="Original Message:", value=discord.utils.escape_markdown(messageClean), inline = False)
.add_field(name="State:", value="DELETED", inline = False)
.set_thumbnail(url=message.author.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
await message.author.send(embed=discord.Embed(title="Offensive Language", color=0xff0000)
.add_field(name="Author Alias:", value=message.author, inline = False)
.add_field(name="Author Name:", value=message.author.name, inline = False)
.add_field(name="Author ID:", value=message.author.id, inline = False)
.add_field(name="Server Name:", value=message.guild.name, inline = False)
.add_field(name="Server ID:", value=message.guild.id, inline = False)
.add_field(name="Channel:", value=message.channel.name, inline = False)
.add_field(name="Offensive " + singularOrPlural, value=(', '.join(offensiveMatchList)), inline = False)
.add_field(name="Original Message:", value=message.content, inline = False)
.add_field(name="State:", value="DELETED", inline = False)
.add_field(name="Rules", value="Please rephrase your message", inline = False)
.set_thumbnail(url=message.author.avatar_url)
.set_footer(text=bot.user.name, icon_url=bot.user.avatar_url))
return await message.delete()
for t in messages.TRIGGERS:
if t in message.content.upper() or t in message.content.lower():
for reaction in messages.TRIGGERS[t]:
await message.add_reaction(reaction)
return await bot.process_commands(message)
else:
return await message.channel.send("I can't respond here. Beep boop beep.")
bot.run(TOKEN)
| 41.588045
| 238
| 0.608865
|
d5d8f272ef58601b345ddba4cf9e31c6344f26b9
| 3,656
|
py
|
Python
|
petStockWeb/petStockWeb/middlewares.py
|
laboratoryyingong/web-scraper-pet
|
900154c20cb0c1515843023528b28e37f6fb0780
|
[
"MIT"
] | null | null | null |
petStockWeb/petStockWeb/middlewares.py
|
laboratoryyingong/web-scraper-pet
|
900154c20cb0c1515843023528b28e37f6fb0780
|
[
"MIT"
] | null | null | null |
petStockWeb/petStockWeb/middlewares.py
|
laboratoryyingong/web-scraper-pet
|
900154c20cb0c1515843023528b28e37f6fb0780
|
[
"MIT"
] | null | null | null |
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class PetstockwebSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class PetstockwebDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.153846
| 78
| 0.675055
|
aed3ffd3937dde28c04bc088307d39d65d105361
| 302
|
py
|
Python
|
tools/testing/kunit/qemu_configs/i386.py
|
jainsakshi2395/linux
|
7ccb860232bb83fb60cd6bcf5aaf0c008d903acb
|
[
"Linux-OpenIB"
] | 44
|
2022-03-16T08:32:31.000Z
|
2022-03-31T16:02:35.000Z
|
tools/testing/kunit/qemu_configs/i386.py
|
jainsakshi2395/linux
|
7ccb860232bb83fb60cd6bcf5aaf0c008d903acb
|
[
"Linux-OpenIB"
] | 1
|
2021-01-27T01:29:47.000Z
|
2021-01-27T01:29:47.000Z
|
tools/testing/kunit/qemu_configs/i386.py
|
jainsakshi2395/linux
|
7ccb860232bb83fb60cd6bcf5aaf0c008d903acb
|
[
"Linux-OpenIB"
] | 18
|
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
from ..qemu_config import QemuArchParams
QEMU_ARCH = QemuArchParams(linux_arch='i386',
kconfig='''
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='x86_64',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
extra_qemu_params=[''])
| 27.454545
| 45
| 0.718543
|
092fdc160284ffd9050feda359aca0bf9effde9d
| 2,705
|
py
|
Python
|
Python/partition-array-for-maximum-sum.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/partition-array-for-maximum-sum.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/partition-array-for-maximum-sum.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(n * k)
# Space: O(k)
# 1043
# Given an integer array A, you partition the array into (contiguous) subarrays of length at most K.
# After partitioning, each subarray has their values changed to become the maximum value of that subarray.
#
# Return the largest sum of the given array after partitioning.
# Solution:
# dp[i] record the maximum sum we can get considering A[0] ~ A[i]
# To get dp[i], we will try to change k last numbers separately to the maximum of them,
# for k = 1..K.
class Solution(object):
def maxSumAfterPartitioning(self, A, K): # USE THIS space O(n)
"""
:type A: List[int]
:type K: int
:rtype: int
"""
N = len(A)
dp = [0]*N
for i in range(N):
mx = float('-inf')
for l in range(1, K+1):
if i+1 >= l:
mx = max(mx, A[i-l+1])
dp[i] = max(dp[i], (dp[i-l] if i>=l else 0) + l*mx)
return dp[N-1]
# best space complexity, reuse rolling spaces.
def maxSumAfterPartitioning_kamyu(self, A, K):
W = K+1
dp = [0]*W
for i in range(len(A)):
curr_max = 0
for k in range(1, min(K, i+1) + 1):
curr_max = max(curr_max, A[i-k+1])
dp[i % W] = max(dp[i % W], (dp[(i-k) % W] if i >= k else 0) + curr_max*k)
return dp[(len(A)-1) % W]
# negative indices are circular from the end of dp array, not easy to understand
def maxSumAfterPartitioning_lee215(self, A, K):
N = len(A)
dp = [0] * (N + K)
for i in range(N):
curMax = 0
for k in range(1, min(K, i + 1) + 1):
curMax = max(curMax, A[i - k + 1])
dp[i] = max(dp[i], dp[i - k] + curMax * k)
return dp[N - 1]
# TLE, DP: start with shorter sequence; result for longer sequence can always be obtained from 2 sub-sequence
# O(n^3)
def maxSumAfterPartitioning_ming(self, A: List[int], K: int) -> int:
N = len(A)
dp = [[0] * N for _ in range(N)]
mx = [[0] * N for _ in range(N)]
for i in range(N):
mx[i][i] = A[i]
for j in range(i + 1, N):
mx[i][j] = max(mx[i][j - 1], A[j])
for l in range(1, N + 1):
for i in range(N - l + 1):
j = i + l - 1
if l <= K:
dp[i][j] = l * mx[i][j]
else:
for k in range(i, j):
dp[i][j] = max(dp[i][j], dp[i][k] + dp[k + 1][j])
return dp[0][-1]
print(Solution().maxSumAfterPartitioning([1,15,7,9,2,5,10], 3)) # 84, A becomes [15,15,15,9,10,10,10]
| 35.12987
| 113
| 0.490573
|
adb7cd0fbc462565ef1111b537607e93cbe9f138
| 339
|
py
|
Python
|
musix/util/files.py
|
andrijaJ01/musix-rewrite
|
1a47084410347ee18a32a645e4e158d08cf7db6f
|
[
"MIT"
] | null | null | null |
musix/util/files.py
|
andrijaJ01/musix-rewrite
|
1a47084410347ee18a32a645e4e158d08cf7db6f
|
[
"MIT"
] | null | null | null |
musix/util/files.py
|
andrijaJ01/musix-rewrite
|
1a47084410347ee18a32a645e4e158d08cf7db6f
|
[
"MIT"
] | null | null | null |
import os
import toml
config={}
def __ensure__(path):
if not os.path.isdir(path):
os.makedirs(path,exist_ok=True)
def read_conf(file=None):
global config
if file:
print(f"reading from config: {file}")
print("reading from global config file")
config = toml.load("configs/config.toml")
return config
| 22.6
| 45
| 0.669617
|
deb3700278c2a80584f00b0b8e47706655e5ef2b
| 483
|
py
|
Python
|
readthedocs/builds/migrations/0006_add_config_field.py
|
mforbes/readthedocs.org
|
92f6224a67648a6d27e7a295973c2718d07cee11
|
[
"MIT"
] | 4,054
|
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/builds/migrations/0006_add_config_field.py
|
mforbes/readthedocs.org
|
92f6224a67648a6d27e7a295973c2718d07cee11
|
[
"MIT"
] | 4,282
|
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/builds/migrations/0006_add_config_field.py
|
mforbes/readthedocs.org
|
92f6224a67648a6d27e7a295973c2718d07cee11
|
[
"MIT"
] | 3,224
|
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-02 13:24
import jsonfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('builds', '0005_remove-version-alias'),
]
operations = [
migrations.AddField(
model_name='build',
name='_config',
field=jsonfield.fields.JSONField(default=dict, verbose_name='Configuration used in the build'),
),
]
| 24.15
| 107
| 0.625259
|
562804897941695eccd6e89331997905945256c1
| 3,328
|
py
|
Python
|
managesf/tests/test_utils.py
|
enovance/managesf
|
5f6bc6857ebbffb929a063ccc3ab94317fa3784a
|
[
"Apache-2.0"
] | null | null | null |
managesf/tests/test_utils.py
|
enovance/managesf
|
5f6bc6857ebbffb929a063ccc3ab94317fa3784a
|
[
"Apache-2.0"
] | null | null | null |
managesf/tests/test_utils.py
|
enovance/managesf
|
5f6bc6857ebbffb929a063ccc3ab94317fa3784a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import TestCase
from mock import patch
from managesf.controllers import utils
from managesf.tests import dummy_conf
import json
class FakeResponse():
def __init__(self, code, content=None, text=None, cookies=None):
self.status_code = code
self.content = content
self.text = text
self.cookies = cookies
def json(self):
return json.loads(self.content)
class TestUtils(TestCase):
@classmethod
def setupClass(cls):
cls.conf = dummy_conf()
utils.conf = cls.conf
class TestRemoteUser(TestCase):
@classmethod
def setupClass(cls):
cls.conf = dummy_conf()
utils.conf = cls.conf
cls.ru = utils.RemoteUser('john', 'dummy_host',
sshkey_path='dummy_key')
def test_init(self):
opt = ['-o', 'LogLevel=ERROR', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null']
ru = utils.RemoteUser('john', 'dummy_host')
self.assertEqual(opt, ru.opt)
self.assertEqual('john@dummy_host', ru.host)
opt = opt + ['-i', 'dummy_key']
ru = utils.RemoteUser('john', 'dummy_host', sshkey_path='dummy_key')
self.assertEqual(opt, ru.opt)
def test_exe(self):
# Test fail because Popen mock communicate doesn't return tupple
# Temporarly disabled
return True
with patch('managesf.controllers.utils.Popen') as Popen_mock:
p = Popen_mock.return_value
self.ru._exe('pwd')
Popen_mock.assert_called_once_with('pwd', stdout=-1, stderr=-1)
p.wait.assert_any_call()
def test_ssh(self):
with patch('managesf.controllers.utils.RemoteUser._exe') as exe_mock:
cmd = ['ssh'] + self.ru.opt + [self.ru.host] + ['pwd']
self.ru._ssh('pwd')
exe_mock.assert_called_once_with(cmd)
def test__scpFromRemote(self):
with patch('managesf.controllers.utils.RemoteUser._exe') as exe_mock:
src = 'dummy_host1'
dest = 'dummy_host2'
src = '%s:%s' % (self.ru.host, src)
cmd = ['scp'] + self.ru.opt + [src, dest]
self.ru._scpFromRemote('dummy_host1', 'dummy_host2')
exe_mock.assert_called_once_with(cmd)
def test__scpToRemote(self):
with patch('managesf.controllers.utils.RemoteUser._exe') as exe_mock:
src = 'dummy_host1'
dest = 'dummy_host2'
dest = '%s:%s' % (self.ru.host, dest)
cmd = ['scp'] + self.ru.opt + [src, dest]
self.ru._scpToRemote('dummy_host1', 'dummy_host2')
exe_mock.assert_called_once_with(cmd)
| 35.404255
| 77
| 0.632813
|
36bc59f2a66417c68b0f9513f639f0a7dbefab01
| 168
|
py
|
Python
|
Pacotes/ex049.py
|
TonyRio/Python-Exercicios
|
8a72d1b12418c6485794dae184425df0daf098bb
|
[
"MIT"
] | null | null | null |
Pacotes/ex049.py
|
TonyRio/Python-Exercicios
|
8a72d1b12418c6485794dae184425df0daf098bb
|
[
"MIT"
] | null | null | null |
Pacotes/ex049.py
|
TonyRio/Python-Exercicios
|
8a72d1b12418c6485794dae184425df0daf098bb
|
[
"MIT"
] | null | null | null |
n = int(input('qual a tabuada que quer imprimir ?'))
print('**'*10)
for c in range(1,11, 1):
print(' {:^} * {:^} = {:^} *'.format (c,n, c*n))
print('**'*10)
| 33.6
| 60
| 0.47619
|
37334039a1dee7971313e5d7cc87d901c5000320
| 12,000
|
py
|
Python
|
dymos/examples/brachistochrone/test/test_brachistochrone_solve_segments.py
|
pgkirsch/dymos
|
d4e68bd3db13972dcbf9462c37c55814f521d762
|
[
"Apache-2.0"
] | 1
|
2021-07-19T17:03:49.000Z
|
2021-07-19T17:03:49.000Z
|
dymos/examples/brachistochrone/test/test_brachistochrone_solve_segments.py
|
RutvikM3/dymos
|
b445fff6f27d910007dad48c844c9bc538804122
|
[
"Apache-2.0"
] | null | null | null |
dymos/examples/brachistochrone/test/test_brachistochrone_solve_segments.py
|
RutvikM3/dymos
|
b445fff6f27d910007dad48c844c9bc538804122
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
import openmdao.api as om
import dymos as dm
import dymos.examples.brachistochrone.test.ex_brachistochrone_vector_states as ex_brachistochrone_vs
import dymos.examples.brachistochrone.test.ex_brachistochrone as ex_brachistochrone
from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.utils.general_utils import set_pyoptsparse_opt
OPT, OPTIMIZER = set_pyoptsparse_opt('SLSQP')
def _make_problem(transcription='gauss-lobatto', num_segments=8, transcription_order=3,
compressed=True, optimizer='SLSQP', run_driver=True, force_alloc_complex=False,
solve_segments=False):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = optimizer
if optimizer == 'SNOPT':
p.driver.opt_settings['iSumm'] = 6
p.driver.opt_settings['Verify level'] = 3
elif optimizer == 'IPOPT':
p.driver.opt_settings['print_level'] = 4
p.driver.declare_coloring(tol=1.0E-12)
if transcription == 'gauss-lobatto':
t = dm.GaussLobatto(num_segments=num_segments,
order=transcription_order,
compressed=compressed)
elif transcription == 'radau-ps':
t = dm.Radau(num_segments=num_segments,
order=transcription_order,
compressed=compressed)
traj = dm.Trajectory()
phase = dm.Phase(ode_class=BrachistochroneODE, transcription=t)
p.model.add_subsystem('traj0', traj)
traj.add_phase('phase0', phase)
phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
phase.add_state('x', fix_initial=False, fix_final=False, solve_segments=solve_segments)
phase.add_state('y', fix_initial=False, fix_final=False, solve_segments=solve_segments)
# Note that by omitting the targets here Dymos will automatically attempt to connect
# to a top-level input named 'v' in the ODE, and connect to nothing if it's not found.
phase.add_state('v', fix_initial=False, fix_final=False, solve_segments=solve_segments)
phase.add_control('theta',
continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_parameter('g', targets=['g'], units='m/s**2')
phase.add_boundary_constraint('x', loc='initial', equals=0)
phase.add_boundary_constraint('y', loc='initial', equals=10)
phase.add_boundary_constraint('v', loc='initial', equals=0)
phase.add_boundary_constraint('x', loc='final', equals=10)
phase.add_boundary_constraint('y', loc='final', equals=5)
# Minimize time at the end of the phase
phase.add_objective('time_phase', loc='final', scaler=10)
p.setup(check=['unconnected_inputs'], force_alloc_complex=force_alloc_complex)
p['traj0.phase0.t_initial'] = 0.0
p['traj0.phase0.t_duration'] = 2.0
p['traj0.phase0.states:x'] = phase.interp('x', [0, 10])
p['traj0.phase0.states:y'] = phase.interp('y', [10, 5])
p['traj0.phase0.states:v'] = phase.interp('v', [0, 9.9])
p['traj0.phase0.controls:theta'] = phase.interp('theta', [5, 100])
p['traj0.phase0.parameters:g'] = 9.80665
# dm.run_problem(p, run_driver=run_driver, simulate=True, make_plots=False)
return p
@use_tempdirs
class TestBrachistochroneVectorStatesExampleSolveSegments(unittest.TestCase):
def assert_results(self, p):
t_initial = p.get_val('traj0.phase0.time')[0]
t_final = p.get_val('traj0.phase0.time')[-1]
x0 = p.get_val('traj0.phase0.timeseries.states:pos')[0, 0]
xf = p.get_val('traj0.phase0.timeseries.states:pos')[0, -1]
y0 = p.get_val('traj0.phase0.timeseries.states:pos')[0, 1]
yf = p.get_val('traj0.phase0.timeseries.states:pos')[-1, 1]
v0 = p.get_val('traj0.phase0.timeseries.states:v')[0, 0]
vf = p.get_val('traj0.phase0.timeseries.states:v')[-1, 0]
g = p.get_val('traj0.phase0.timeseries.parameters:g')
thetaf = p.get_val('traj0.phase0.timeseries.controls:theta')[-1, 0]
assert_almost_equal(t_initial, 0.0)
assert_almost_equal(x0, 0.0)
assert_almost_equal(y0, 10.0)
assert_almost_equal(v0, 0.0)
assert_almost_equal(t_final, 1.8016, decimal=4)
assert_almost_equal(xf, 10.0, decimal=3)
assert_almost_equal(yf, 5.0, decimal=3)
assert_almost_equal(vf, 9.902, decimal=3)
assert_almost_equal(g, 9.80665, decimal=3)
assert_almost_equal(thetaf, 100.12, decimal=0)
def test_ex_brachistochrone_vs_radau_compressed(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',
compressed=True,
force_alloc_complex=True,
solve_segments='forward',
num_segments=10,
transcription_order=3)
self.assert_results(p)
def test_ex_brachistochrone_vs_gl_compressed(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',
compressed=True,
force_alloc_complex=True,
solve_segments='forward',
num_segments=10,
transcription_order=3)
self.assert_results(p)
def test_ex_brachistochrone_vs_radau_uncompressed(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',
compressed=False,
force_alloc_complex=True,
solve_segments='forward',
num_segments=10,
transcription_order=3)
self.assert_results(p)
def test_ex_brachistochrone_vs_gl_uncompressed(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',
compressed=False,
force_alloc_complex=True,
solve_segments='forward',
num_segments=10,
transcription_order=3)
self.assert_results(p)
def test_ex_brachistochrone_vs_radau_single_segment(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',
compressed=True,
force_alloc_complex=True,
solve_segments='forward',
num_segments=1,
transcription_order=11)
self.assert_results(p)
def test_ex_brachistochrone_vs_gl_single_segment(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',
compressed=True,
force_alloc_complex=True,
solve_segments='forward',
num_segments=1,
transcription_order=11)
self.assert_results(p)
def test_ex_brachistochrone_vs_radau_single_segment(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',
compressed=False,
force_alloc_complex=True,
solve_segments='forward',
num_segments=1,
transcription_order=11)
self.assert_results(p)
def test_ex_brachistochrone_vs_gl_single_segment(self):
ex_brachistochrone_vs.SHOW_PLOTS = False
p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',
compressed=False,
force_alloc_complex=True,
solve_segments='forward',
num_segments=1,
transcription_order=11)
self.assert_results(p)
@use_tempdirs
class TestBrachistochroneSolveSegments(unittest.TestCase):
def assert_results(self, p):
t_initial = p.get_val('traj0.phase0.time')[0]
t_final = p.get_val('traj0.phase0.time')[-1]
x0 = p.get_val('traj0.phase0.timeseries.states:x')[0]
xf = p.get_val('traj0.phase0.timeseries.states:x')[-1]
y0 = p.get_val('traj0.phase0.timeseries.states:y')[0]
yf = p.get_val('traj0.phase0.timeseries.states:y')[-1]
v0 = p.get_val('traj0.phase0.timeseries.states:v')[0]
vf = p.get_val('traj0.phase0.timeseries.states:v')[-1]
g = p.get_val('traj0.phase0.timeseries.parameters:g')
thetaf = p.get_val('traj0.phase0.timeseries.controls:theta')[-1, 0]
assert_almost_equal(t_initial, 0.0)
assert_almost_equal(x0, 0.0, decimal=4)
assert_almost_equal(y0, 10.0, decimal=4)
assert_almost_equal(v0, 0.0, decimal=4)
assert_almost_equal(t_final, 1.8016, decimal=4)
assert_almost_equal(xf, 10.0, decimal=3)
assert_almost_equal(yf, 5.0, decimal=3)
assert_almost_equal(vf, 9.902, decimal=3)
assert_almost_equal(g, 9.80665, decimal=3)
assert_almost_equal(thetaf, 100.12, decimal=0)
def test_brachistochrone_solve_segments(self):
for tx in ('radau-ps', 'gauss-lobatto'):
for solve_segs in (False, 'forward', 'backward', None):
for compressed in (True, False):
print(f'transcription: {tx} solve_segments: {solve_segs} compressed: {compressed}')
with self.subTest(f'transcription: {tx} solve_segments: {solve_segs} '
f'compressed: {compressed}'):
p = _make_problem(transcription=tx,
compressed=compressed,
optimizer='SLSQP',
force_alloc_complex=True,
solve_segments=solve_segs,
num_segments=20,
transcription_order=3)
dm.run_problem(p)
self.assert_results(p)
| 48.192771
| 105
| 0.549833
|
b5ce3e78c49c2f00c13d8ad3a62850030efe81cd
| 12,051
|
py
|
Python
|
django/db/migrations/state.py
|
Bashar/django
|
e520a73eeea6b185b719901ab9985ecef00e5664
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/migrations/state.py
|
Bashar/django
|
e520a73eeea6b185b719901ab9985ecef00e5664
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/migrations/state.py
|
Bashar/django
|
e520a73eeea6b185b719901ab9985ecef00e5664
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.db import models
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.fields.related import do_pending_lookups
from django.conf import settings
from django.utils import six
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
self.apps = None
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model_state(self, model_state):
self.models[(model_state.app_label, model_state.name.lower())] = model_state
def clone(self):
"Returns an exact copy of this ProjectState"
return ProjectState(
models=dict((k, v.clone()) for k, v in self.models.items()),
real_apps=self.real_apps,
)
def render(self, include_real=None, ignore_swappable=False):
"Turns the project state into actual models in a new Apps"
if self.apps is None:
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
real_models = []
for app_label in self.real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
real_models.append(ModelState.from_model(model))
# Populate the app registry with a stub for each application.
app_labels = set(model_state.app_label for model_state in self.models.values())
self.apps = Apps([AppConfigStub(label) for label in sorted(self.real_apps + list(app_labels))])
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(self.models.values()) + real_models
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self.apps)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError("Cannot resolve bases for %r" % new_unrendered_models)
unrendered_models = new_unrendered_models
# make sure apps has no dangling references
if self.apps._pending_lookups:
# There's some lookups left. See if we can first resolve them
# ourselves - sometimes fields are added after class_prepared is sent
for lookup_model, operations in self.apps._pending_lookups.items():
try:
model = self.apps.get_model(lookup_model[0], lookup_model[1])
except LookupError:
if "%s.%s" % (lookup_model[0], lookup_model[1]) == settings.AUTH_USER_MODEL and ignore_swappable:
continue
# Raise an error with a best-effort helpful message
# (only for the first issue). Error message should look like:
# "ValueError: Lookup failed for model referenced by
# field migrations.Book.author: migrations.Author"
raise ValueError("Lookup failed for model referenced by field {field}: {model[0]}.{model[1]}".format(
field=operations[0][1],
model=lookup_model,
))
else:
do_pending_lookups(model)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models():
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
@classmethod
def from_model(cls, model):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
"%s.%s" % (base._meta.app_label, base._meta.model_name)
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
)
def clone(self):
"Returns an exact copy of this ModelState"
# We deep-clone the fields using deconstruction
fields = []
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
fields.append((name, field_class(*args, **kwargs)))
# Now make a copy
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=fields,
options=dict(self.options),
bases=self.bases,
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
if "unique_together" in meta_contents:
meta_contents["unique_together"] = list(meta_contents["unique_together"])
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.fields)
body['Meta'] = meta
body['__module__'] = "__fake__"
# Then, make a Model object
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:])) for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases)
)
def __ne__(self, other):
return not (self == other)
| 41.412371
| 139
| 0.585512
|
ce165cec65793a13989381e46c064de48741b06c
| 8,355
|
py
|
Python
|
Section_05/5.02/view.py
|
PacktPublishing/Tkinter-GUI-Application-Development-Projects
|
58b49e23e887bf45810666cb1c63d1e06291873b
|
[
"MIT"
] | 11
|
2019-05-24T17:03:55.000Z
|
2021-11-24T23:59:38.000Z
|
Section_05/5.02/view.py
|
PacktPublishing/Tkinter-GUI-Application-Development-Projects
|
58b49e23e887bf45810666cb1c63d1e06291873b
|
[
"MIT"
] | null | null | null |
Section_05/5.02/view.py
|
PacktPublishing/Tkinter-GUI-Application-Development-Projects
|
58b49e23e887bf45810666cb1c63d1e06291873b
|
[
"MIT"
] | 9
|
2019-06-19T02:20:15.000Z
|
2022-03-25T01:36:14.000Z
|
import tkinter as tk
import tkinter.filedialog
import tkinter.messagebox
import tkinter.ttk
import model
import player
AUDIO_PLAYER_NAME = "Achtung Baby"
class View:
loop_choices = [("No Loop", 1), ("Loop Current", 2), ("Loop All", 3)]
def __init__(self, root, model, player):
self.root = root
self.model = model
self.player = player
self.create_gui()
def create_gui(self):
self.root.title(AUDIO_PLAYER_NAME)
self.create_top_display()
self.create_button_frame()
self.create_list_box()
self.create_bottom_frame()
self.create_context_menu()
def create_top_display(self):
frame = tk.Frame(self.root)
glass_frame_image = tk.PhotoImage(file='../icons/glass_frame.gif')
self.canvas = tk.Canvas(frame, width=370, height=90)
self.canvas.image = glass_frame_image
self.canvas.grid(row=1)
self.console = self.canvas.create_image(
0, 10, anchor=tk.NW, image=glass_frame_image)
self.clock = self.canvas.create_text(125, 68, anchor=tk.W, fill='#CBE4F6',
text="00:00")
self.track_length_text = self.canvas.create_text(167, 68, anchor=tk.W, fill='#CBE4F6',
text="of 00:00")
self.track_name = self.canvas.create_text(50, 35, anchor=tk.W, fill='#9CEDAC',
text='\"Currently playing: none \"')
frame.grid(row=1, pady=1, padx=0)
def create_button_frame(self):
frame = tk.Frame(self.root)
previous_track_icon = tk.PhotoImage(file='../icons/previous_track.gif')
previous_track_button = tk.Button(
frame, image=previous_track_icon, borderwidth=0, padx=0, command=self.on_previous_track_button_clicked)
previous_track_button.image = previous_track_icon
previous_track_button.grid(row=3, column=1, sticky='w')
rewind_icon = tk.PhotoImage(file='../icons/rewind.gif')
rewind_button = tk.Button(
frame, image=rewind_icon, borderwidth=0, padx=0, command=self.on_rewind_button_clicked)
rewind_button.image = rewind_icon
rewind_button.grid(row=3, column=2, sticky='w')
self.play_icon = tk.PhotoImage(file='../icons/play.gif')
self.stop_icon = tk.PhotoImage(file='../icons/stop.gif')
self.play_stop_button = tk.Button(
frame, image=self.play_icon, borderwidth=0, padx=0, command=self.on_play_stop_button_clicked)
self.play_stop_button.image = self.play_icon
self.play_stop_button.grid(row=3, column=3)
pause_icon = tk.PhotoImage(file='../icons/pause.gif')
pause_unpause_button = tk.Button(
frame, image=pause_icon, borderwidth=0, padx=0, command=self.on_pause_unpause_button_clicked)
pause_unpause_button.image = pause_icon
pause_unpause_button.grid(row=3, column=4)
fast_forward_icon = tk.PhotoImage(file='../icons/fast_forward.gif')
fast_forward_button = tk.Button(
frame, image=fast_forward_icon, borderwidth=0, padx=0, command=self.on_fast_forward_button_clicked)
fast_forward_button.image = fast_forward_icon
fast_forward_button.grid(row=3, column=5)
next_track_icon = tk.PhotoImage(file='../icons/next_track.gif')
next_track_button = tk.Button(
frame, image=next_track_icon, borderwidth=0, padx=0, command=self.on_next_track_button_clicked)
next_track_button.image = next_track_icon
next_track_button.grid(row=3, column=6)
self.mute_icon = tk.PhotoImage(file='../icons/mute.gif')
self.unmute_icon = tk.PhotoImage(file='../icons/unmute.gif')
self.mute_unmute_button = tk.Button(
frame, image=self.unmute_icon, text='unmute', borderwidth=0, padx=0, command=self.on_mute_unmute_button_clicked)
self.mute_unmute_button.image = self.unmute_icon
self.mute_unmute_button.grid(row=3, column=7)
self.volume_scale = tkinter.ttk.Scale(
frame, from_=0.0, to=1.0, command=self.on_volume_scale_changed)
self.volume_scale.set(0.6)
self.volume_scale.grid(row=3, column=8, padx=5)
frame.grid(row=3, columnspan=5, sticky='w', pady=4, padx=5)
def create_list_box(self):
frame = tk.Frame(self.root)
self.list_box = tk.Listbox(frame, activestyle='none', cursor='hand2',
bg='#1C3D7D', fg='#A0B9E9', selectmode=tk.EXTENDED, height=10)
self.list_box.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.list_box.bind(
"<Double-Button-1>", self.on_play_list_double_clicked)
self.list_box.bind("<Button-3>", self.show_context_menu)
scroll_bar = tk.Scrollbar(frame)
scroll_bar.pack(side=tk.RIGHT, fill=tk.BOTH)
self.list_box.config(yscrollcommand=scroll_bar.set)
scroll_bar.config(command=self.list_box.yview)
frame.grid(row=4, padx=5, columnspan=10, sticky='ew')
def create_bottom_frame(self):
frame = tk.Frame(self.root)
add_file_icon = tk.PhotoImage(file='../icons/add_file.gif')
add_file_button = tk.Button(frame, image=add_file_icon, borderwidth=0,
padx=0, text='Add File', command=self.on_add_file_button_clicked)
add_file_button.image = add_file_icon
add_file_button.grid(row=5, column=1)
remove_selected_icon = tk.PhotoImage(
file='../icons/delete_selected.gif')
remove_selected_button = tk.Button(
frame, image=remove_selected_icon, borderwidth=0, padx=0, text='Delete', command=self.on_remove_selected_button_clicked)
remove_selected_button.image = remove_selected_icon
remove_selected_button.grid(row=5, column=2)
add_directory_icon = tk.PhotoImage(file='../icons/add_directory.gif')
add_directory_button = tk.Button(frame, image=add_directory_icon, borderwidth=0,
padx=0, text='Add Dir', command=self.on_add_directory_button_clicked)
add_directory_button.image = add_directory_icon
add_directory_button.grid(row=5, column=3)
empty_play_list_icon = tk.PhotoImage(
file='../icons/clear_play_list.gif')
empty_play_list_button = tk.Button(frame, image=empty_play_list_icon, borderwidth=0,
padx=0, text='Clear All', command=self.on_clear_play_list_button_clicked)
empty_play_list_button.image = empty_play_list_icon
empty_play_list_button.grid(row=5, column=4)
self.loop_value = tk.IntVar()
self.loop_value.set(3)
for txt, val in self.loop_choices:
tk.Radiobutton(frame, text=txt, variable=self.loop_value, value=val).grid(
row=5, column=4 + val, pady=3)
frame.grid(row=5, sticky='w', padx=5)
def create_context_menu(self):
self.context_menu = tk.Menu(self.list_box, tearoff=0)
self.context_menu.add_command(
label="Delete", command=self.on_remove_selected_context_menu_clicked)
def show_context_menu(self, event):
self.context_menu.tk_popup(event.x_root, event.y_root)
def on_previous_track_button_clicked(self):
pass
def on_rewind_button_clicked(self):
pass
def on_play_stop_button_clicked(self):
pass
def on_pause_unpause_button_clicked(self):
pass
def on_mute_unmute_button_clicked(self):
pass
def on_fast_forward_button_clicked(self):
pass
def on_next_track_button_clicked(self):
pass
def on_volume_scale_changed(self, value):
pass
def on_add_file_button_clicked(self):
pass
def on_remove_selected_button_clicked(self):
pass
def on_add_directory_button_clicked(self):
pass
def on_clear_play_list_button_clicked(self):
pass
def on_remove_selected_context_menu_clicked(self):
pass
def on_play_list_double_clicked(self, event=None):
pass
if __name__ == '__main__':
root = tk.Tk()
root.resizable(width=False, height=False)
model = model.Model()
player = player.Player()
app = View(root, model, player)
root.mainloop()
| 39.976077
| 132
| 0.656373
|
93892c714176e39e8d28d18ba761064ec6f7b5cb
| 3,645
|
py
|
Python
|
line_protocol.py
|
erelsgl/family-fair-allocation
|
abd66e224989a075a822bf83d7d663c177d3bc0a
|
[
"MIT"
] | 1
|
2021-11-07T20:56:21.000Z
|
2021-11-07T20:56:21.000Z
|
line_protocol.py
|
erelsgl/family-fair-allocation
|
abd66e224989a075a822bf83d7d663c177d3bc0a
|
[
"MIT"
] | null | null | null |
line_protocol.py
|
erelsgl/family-fair-allocation
|
abd66e224989a075a822bf83d7d663c177d3bc0a
|
[
"MIT"
] | null | null | null |
#!python3
"""
The line-allocation protocol:
* 1/2-democratic EF1 for two families with general monotone agents;
* 1/k-democratic fair for k families with additive agents.
See: https://arxiv.org/abs/1709.02564 Theorems 4.2 and 5.8.
"""
from agents import *
from families import Family
import fairness_criteria
import logging, sys
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
# To enable tracing, logger.setLevel(logging.INFO)
def allocate(families:list, goods:list)->list:
"""
Order the goods on a line and allocate them in 1/k-democratic fair way among k families,
based on the fairness-criterion of each family.
:return a list of bundles - a bundle per family.
NOTE: The algorithm is guaranteed to finish with an allocation in the following cases:
Case A: there are two families, and the fairness criterion is EF1 or weaker
(1/2-fraction-MMS, 1-of-3-MMS, MMS if the agents are binary, or PROP1?).
This is proved in Theoren 4.2 and Corollary 4.6.
Case B: there are k familis, and the fairness criterion is one of
(1/k-fraction-MMS, 1-of-(2k-1)-MMS, MMS if the agents are binary, or PROP[k-1]).
This is proved in Theorem 5.8.
In other cases, the algorithm behavior is undefined.
>>> fairness_PROP1 = fairness_criteria.ProportionalExceptC(num_of_agents=2,c=1)
>>> family1 = Family([BinaryAgent({"w","x"},1),BinaryAgent({"x","y"},2),BinaryAgent({"y","z"},3), BinaryAgent({"z","w"},4)], fairness_criterion=fairness_PROP1, name="Family 1")
>>> family2 = Family([BinaryAgent({"w","z"},2),BinaryAgent({"z","y"},3)], fairness_criterion=fairness_PROP1, name="Family 2")
>>> (bundle1,bundle2) = allocate([family1, family2], ["w","x","y","z"])
>>> sorted(bundle1)
['w']
>>> sorted(bundle2)
['x', 'y', 'z']
>>> (bundle1,bundle2) = allocate([family2, family1], ["x","w","y","z"])
>>> sorted(bundle1)
['y', 'z']
>>> sorted(bundle2)
['w', 'x']
"""
k = len(families)
if k==1:
family = families[0]
logger.info(" {} gets the remaining bundle".format(family.name))
return [set(goods)]
goods=list(goods) # order the goods on a line
left_sequence = list()
right_sequence = list(goods)
for good in goods:
logger.info("\nCurrent partition: {} | {}:".format(left_sequence,right_sequence))
left_bundle = set(left_sequence)
right_bundle = set(right_sequence)
for family_index in range(len(families)):
family = families[family_index]
num_of_happy_members = family.num_of_happy_members(left_bundle, [right_bundle])
logger.info(" {}: {}/{} members think the left bundle is {}".format(
family.name, num_of_happy_members, family.num_of_members, family.fairness_criterion.abbreviation))
if num_of_happy_members*k >= family.num_of_members:
logger.info(" {} gets the left bundle".format(family.name))
other_families = list(families)
del other_families[family_index]
bundles = allocate(other_families, right_sequence)
bundles.insert (family_index, left_bundle)
return bundles
left_sequence.append(good)
right_sequence.pop(0)
raise AssertionError(
"No family is willing to accept the set of all goods - the fairness criteria are probably too strong")
if __name__ == "__main__":
import doctest
(failures,tests) = doctest.testmod(report=True)
print ("{} failures, {} tests".format(failures,tests))
| 40.955056
| 180
| 0.650754
|
ae777e908e15e2b59bc929c5b2c40d49a7d3379e
| 6,333
|
py
|
Python
|
server/models/campaign.py
|
LDeeJay1969/PlanarAlly
|
0e9e7a2a122948dd75d5570c09537c5e802c16d4
|
[
"MIT"
] | null | null | null |
server/models/campaign.py
|
LDeeJay1969/PlanarAlly
|
0e9e7a2a122948dd75d5570c09537c5e802c16d4
|
[
"MIT"
] | null | null | null |
server/models/campaign.py
|
LDeeJay1969/PlanarAlly
|
0e9e7a2a122948dd75d5570c09537c5e802c16d4
|
[
"MIT"
] | null | null | null |
import uuid
from peewee import (
fn,
BooleanField,
FloatField,
ForeignKeyField,
IntegerField,
TextField,
)
from playhouse.shortcuts import model_to_dict
from .base import BaseModel
from .user import User
from .utils import get_table
__all__ = [
"GridLayer",
"Layer",
"Location",
"LocationUserOption",
"Note",
"PlayerRoom",
"Room",
]
class Room(BaseModel):
name = TextField()
creator = ForeignKeyField(User, backref="rooms_created", on_delete="CASCADE")
invitation_code = TextField(default=uuid.uuid4, unique=True)
player_location = TextField(null=True)
dm_location = TextField(null=True)
is_locked = BooleanField(default=False)
def __repr__(self):
return f"<Room {self.get_path()}>"
def get_path(self):
return f"{self.creator.name}/{self.name}"
def get_active_location(self, dm):
if dm:
return Location.get(room=self, name=self.dm_location)
else:
return Location.get(room=self, name=self.player_location)
class Meta:
indexes = ((("name", "creator"), True),)
class PlayerRoom(BaseModel):
player = ForeignKeyField(User, backref="rooms_joined", on_delete="CASCADE")
room = ForeignKeyField(Room, backref="players", on_delete="CASCADE")
def __repr__(self):
return f"<PlayerRoom {self.room.get_path()} - {self.player.name}>"
class Location(BaseModel):
room = ForeignKeyField(Room, backref="locations", on_delete="CASCADE")
name = TextField()
unit_size = FloatField(default=5)
unit_size_unit = TextField(default="ft")
use_grid = BooleanField(default=True)
full_fow = BooleanField(default=False)
fow_opacity = FloatField(default=0.3)
fow_los = BooleanField(default=False)
vision_mode = TextField(default="triangle")
# default is 1km max, 0.5km min
vision_min_range = FloatField(default=1640)
vision_max_range = FloatField(default=3281)
def __repr__(self):
return f"<Location {self.get_path()}>"
def get_path(self):
return f"{self.room.get_path()}/{self.name}"
def as_dict(self):
return model_to_dict(self, recurse=False, exclude=[Location.id, Location.room])
def add_default_layers(self):
Layer.create(
location=self, name="map", type_="normal", player_visible=True, index=0
)
Layer.create(
location=self,
name="grid",
type_="grid",
selectable=False,
player_visible=True,
index=1,
)
Layer.create(
location=self,
name="tokens",
type_="normal",
player_visible=True,
player_editable=True,
index=2,
)
Layer.create(location=self, type_="normal", name="dm", index=3)
Layer.create(
location=self, type_="fow", name="fow", player_visible=True, index=4
)
Layer.create(
location=self,
name="fow-players",
type_="fow-players",
selectable=False,
player_visible=True,
index=5,
)
Layer.create(
location=self,
name="draw",
type_="normal",
selectable=False,
player_visible=True,
player_editable=True,
index=6,
)
class Meta:
indexes = ((("room", "name"), True),)
class Note(BaseModel):
uuid = TextField(primary_key=True)
room = ForeignKeyField(Room, backref="notes", on_delete="CASCADE")
location = ForeignKeyField(
Location, null=True, backref="notes", on_delete="CASCADE"
)
user = ForeignKeyField(User, backref="notes", on_delete="CASCADE")
title = TextField(null=True)
text = TextField(null=True)
def __repr__(self):
return f"<Note {self.title} {self.room.get_path()} - {self.user.name}"
def as_dict(self):
return model_to_dict(
self, recurse=False, exclude=[Note.room, Note.location, Note.user]
)
class Layer(BaseModel):
location = ForeignKeyField(Location, backref="layers", on_delete="CASCADE")
name = TextField()
type_ = TextField()
# TYPE = IntegerField() # normal/grid/dm/lighting ???????????
player_visible = BooleanField(default=False)
player_editable = BooleanField(default=False)
selectable = BooleanField(default=True)
index = IntegerField()
def __repr__(self):
return f"<Layer {self.get_path()}>"
def get_path(self):
return f"{self.location.get_path()}/{self.name}"
def as_dict(self, user: User, dm: bool):
from .shape import Shape
data = model_to_dict(
self, recurse=False, exclude=[Layer.id, Layer.player_visible]
)
data["shapes"] = [
shape.as_dict(user, dm) for shape in self.shapes.order_by(Shape.index)
]
if self.type_ == "grid":
type_table = get_table(f"{self.type_}layer")
data.update(
**model_to_dict(type_table.get(id=self.id), exclude=[type_table.id])
)
return data
class Meta:
indexes = ((("location", "name"), True), (("location", "index"), True))
class GridLayer(BaseModel):
size = FloatField(default=50)
layer = ForeignKeyField(Layer, on_delete="CASCADE")
class LocationUserOption(BaseModel):
location = ForeignKeyField(Location, backref="user_options", on_delete="CASCADE")
user = ForeignKeyField(User, backref="location_options", on_delete="CASCADE")
pan_x = IntegerField(default=0)
pan_y = IntegerField(default=0)
zoom_factor = FloatField(default=1.0)
active_layer = ForeignKeyField(Layer, backref="active_users", null=True)
def __repr__(self):
return f"<LocationUserOption {self.location.get_path()} - {self.user.name}>"
def as_dict(self):
d = model_to_dict(
self,
recurse=False,
exclude=[
LocationUserOption.id,
LocationUserOption.location,
LocationUserOption.user,
],
)
if self.active_layer:
d["active_layer"] = self.active_layer.name
return d
class Meta:
indexes = ((("location", "user"), True),)
| 29.455814
| 87
| 0.610769
|
02119c9520d60bfbfce8af1cab1b09fcb0776f32
| 8,385
|
py
|
Python
|
legal-api/tests/unit/resources/v2/test_business.py
|
vysakh-menon-aot/lear
|
7bae45efa2f9f89a7e826567c85de55fde68e09e
|
[
"Apache-2.0"
] | 8
|
2019-06-19T16:16:15.000Z
|
2021-08-28T23:56:40.000Z
|
legal-api/tests/unit/resources/v2/test_business.py
|
vysakh-menon-aot/lear
|
7bae45efa2f9f89a7e826567c85de55fde68e09e
|
[
"Apache-2.0"
] | 796
|
2019-03-07T19:25:50.000Z
|
2022-03-31T20:32:57.000Z
|
legal-api/tests/unit/resources/v2/test_business.py
|
vysakh-menon-aot/lear
|
7bae45efa2f9f89a7e826567c85de55fde68e09e
|
[
"Apache-2.0"
] | 82
|
2019-01-30T20:06:14.000Z
|
2022-03-29T20:38:31.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the business end-point.
Test-Suite to ensure that the /businesses endpoint is working as expected.
"""
import copy
from http import HTTPStatus
import registry_schemas
from registry_schemas.example_data import FILING_TEMPLATE, INCORPORATION
from legal_api.models import Filing
from legal_api.services.authz import STAFF_ROLE
from legal_api.utils.datetime import datetime
from tests import integration_affiliation
from tests.unit.services.utils import create_header
def factory_business_model(legal_name,
identifier,
founding_date,
last_ledger_timestamp,
last_modified,
fiscal_year_end_date=None,
tax_id=None,
dissolution_date=None):
"""Return a valid Business object stamped with the supplied designation."""
from legal_api.models import Business as BusinessModel
b = BusinessModel(legal_name=legal_name,
identifier=identifier,
founding_date=founding_date,
last_ledger_timestamp=last_ledger_timestamp,
last_modified=last_modified,
fiscal_year_end_date=fiscal_year_end_date,
dissolution_date=dissolution_date,
tax_id=tax_id
)
b.save()
return b
def test_create_bootstrap_failure_filing(client, jwt):
"""Assert the an empty filing cannot be used to bootstrap a filing."""
filing = None
rv = client.post('/api/v2/businesses?draft=true',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
@integration_affiliation
def test_create_bootstrap_minimal_draft_filing(client, jwt):
"""Assert that a minimal filing can be used to create a draft filing."""
filing = {'filing':
{
'header':
{
'name': 'incorporationApplication',
'accountId': 28
}
}
}
rv = client.post('/api/v2/businesses?draft=true',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
assert rv.json['filing']['business']['identifier']
assert rv.json['filing']['header']['accountId'] == 28
assert rv.json['filing']['header']['name'] == 'incorporationApplication'
@integration_affiliation
def test_create_bootstrap_validate_success_filing(client, jwt):
"""Assert that a valid IA can be validated."""
filing = copy.deepcopy(FILING_TEMPLATE)
filing['filing'].pop('business')
filing['filing']['incorporationApplication'] = copy.deepcopy(INCORPORATION)
filing['filing']['header']['name'] = 'incorporationApplication'
filing['filing']['header']['accountId'] = 28
# remove fed
filing['filing']['header'].pop('effectiveDate')
rv = client.post('/api/v2/businesses?only_validate=true',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.OK
assert rv.json['filing']['header']['accountId'] == 28
assert rv.json['filing']['header']['name'] == 'incorporationApplication'
@integration_affiliation
def test_create_incorporation_success_filing(client, jwt, session):
"""Assert that a valid IA can be posted."""
filing = copy.deepcopy(FILING_TEMPLATE)
filing['filing'].pop('business')
filing['filing']['incorporationApplication'] = copy.deepcopy(INCORPORATION)
filing['filing']['header']['name'] = 'incorporationApplication'
filing['filing']['header']['accountId'] = 28
filing['filing']['header']['routingSlipNumber'] = '111111111'
# remove fed
filing['filing']['header'].pop('effectiveDate')
rv = client.post('/api/v2/businesses',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
assert rv.json['filing']['header']['accountId'] == 28
assert rv.json['filing']['header']['name'] == 'incorporationApplication'
filing = Filing.get_filing_by_payment_token(rv.json['filing']['header']['paymentToken'])
assert filing
assert filing.status == Filing.Status.PENDING.value
def test_get_temp_business_info(session, client, jwt):
"""Assert that temp registration returns 200."""
identifier = 'T7654321'
rv = client.get('/api/v2/businesses/' + identifier,
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.OK
def test_get_business_info(session, client, jwt):
"""Assert that the business info can be received in a valid JSONSchema format."""
identifier = 'CP7654321'
legal_name = identifier + ' legal name'
factory_business_model(legal_name=legal_name,
identifier=identifier,
founding_date=datetime.utcfromtimestamp(0),
last_ledger_timestamp=datetime.utcfromtimestamp(0),
last_modified=datetime.utcfromtimestamp(0),
fiscal_year_end_date=None,
tax_id=None,
dissolution_date=None)
rv = client.get('/api/v2/businesses/' + identifier,
headers=create_header(jwt, [STAFF_ROLE], identifier))
print('business json', rv.json)
assert rv.json['business']['identifier'] == identifier
print('valid schema?', registry_schemas.validate(rv.json, 'business'))
assert registry_schemas.validate(rv.json, 'business')
def test_get_business_info_dissolution(session, client, jwt):
"""Assert that the business info cannot be received in a valid JSONSchema format."""
identifier = 'CP1234567'
legal_name = identifier + ' legal name'
factory_business_model(legal_name=legal_name,
identifier=identifier,
founding_date=datetime.utcfromtimestamp(0),
last_ledger_timestamp=datetime.utcfromtimestamp(0),
last_modified=datetime.utcfromtimestamp(0),
fiscal_year_end_date=None,
tax_id=None,
dissolution_date=datetime.utcfromtimestamp(0))
rv = client.get(f'/api/v2/businesses/{identifier}',
headers=create_header(jwt, [STAFF_ROLE], identifier))
# dissolved company cannot be found.
assert rv.status_code == 200
assert rv.json.get('business').get('dissolutionDate')
assert rv.json.get('business').get('identifier') == identifier
def test_get_business_info_missing_business(session, client, jwt):
"""Assert that the business info can be received in a valid JSONSchema format."""
factory_business_model(legal_name='legal_name',
identifier='CP7654321',
founding_date=datetime.utcfromtimestamp(0),
last_ledger_timestamp=datetime.utcfromtimestamp(0),
last_modified=datetime.utcfromtimestamp(0),
fiscal_year_end_date=None,
tax_id=None,
dissolution_date=None)
identifier = 'CP0000001'
rv = client.get(f'/api/v2/businesses/{identifier}',
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json == {'message': f'{identifier} not found'}
| 40.703883
| 92
| 0.632558
|
806a3514e93d4c68e3f540608d1b396cfb407959
| 5,691
|
py
|
Python
|
Starter_Files/lambda_function.py
|
getjiggy/unit13-challenge
|
1de5f432c75eff22c43262a777f5e8875cea338e
|
[
"Unlicense"
] | null | null | null |
Starter_Files/lambda_function.py
|
getjiggy/unit13-challenge
|
1de5f432c75eff22c43262a777f5e8875cea338e
|
[
"Unlicense"
] | null | null | null |
Starter_Files/lambda_function.py
|
getjiggy/unit13-challenge
|
1de5f432c75eff22c43262a777f5e8875cea338e
|
[
"Unlicense"
] | null | null | null |
### Required Libraries ###
from datetime import datetime
from dateutil.relativedelta import relativedelta
### Functionality Helper Functions ###
def parse_int(n):
"""
Securely converts a non-integer value to integer.
"""
try:
return int(n)
except ValueError:
return float("nan")
def build_validation_result(is_valid, violated_slot, message_content):
"""
Define a result message structured as Lex response.
"""
if message_content is None:
return {"isValid": is_valid, "violatedSlot": violated_slot}
return {
"isValid": is_valid,
"violatedSlot": violated_slot,
"message": {"contentType": "PlainText", "content": message_content},
}
### Dialog Actions Helper Functions ###
def get_slots(intent_request):
"""
Fetch all the slots and their values from the current intent.
"""
return intent_request["currentIntent"]["slots"]
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
"""
Defines an elicit slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "ElicitSlot",
"intentName": intent_name,
"slots": slots,
"slotToElicit": slot_to_elicit,
"message": message,
},
}
def delegate(session_attributes, slots):
"""
Defines a delegate slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {"type": "Delegate", "slots": slots},
}
def close(session_attributes, fulfillment_state, message):
"""
Defines a close slot type response.
"""
response = {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "Close",
"fulfillmentState": fulfillment_state,
"message": message,
},
}
return response
def validate_data(age, amount, intent_request):
if int(age) < 0 or int(age) > 65:
return build_validation_result(False, 'age', 'must be between 0 and 65')
# response = elicit_slot(intent_request['sessionAttributes'], intent_request['currentIntent']['name'], intent_request['currentIntent']['slots'], validation['violatedSlot'], validation['message'])
elif int(amount) < 5000:
return build_validation_result(False, 'investmentAmount', 'must be higher than 5000')
# response = elicit_slot(intent_request['sessionAttributes'], intent_request['currentIntent']['name'], intent_request['currentIntent']['slots'], validation['violatedSlot'], validation['message'])
else:
return build_validation_result(True, None, None)
### Intents Handlers ###
def recommend_portfolio(intent_request):
"""
Performs dialog management and fulfillment for recommending a portfolio.
"""
first_name = get_slots(intent_request)["firstName"]
age = get_slots(intent_request)["age"]
investment_amount = get_slots(intent_request)["investmentAmount"]
risk_level = get_slots(intent_request)["riskLevel"]
source = intent_request["invocationSource"]
if source == "DialogCodeHook":
# Perform basic validation on the supplied input slots.
# Use the elicitSlot dialog action to re-prompt
# for the first violation detected.
### YOUR DATA VALIDATION CODE STARTS HERE ###
validation_result = validate_data(age, investment_amount, intent_request)
if not validation_result['isValid']:
slots[validation_result["violatedSlot"]] = None
return elicit_slot(intent_request['sessionAttributes'], intent_request['currentIntent']['name'], intent_request['currentIntent']['slots'], validation['violatedSlot'], validation['message'])
### YOUR DATA VALIDATION CODE ENDS HERE ###
# Fetch current session attibutes
output_session_attributes = intent_request["sessionAttributes"]
return delegate(output_session_attributes, get_slots(intent_request))
# Get the initial investment recommendation
### YOUR FINAL INVESTMENT RECOMMENDATION CODE STARTS HERE ###
responseDict = {
"None": "100% bonds (AGG), 0% equities (SPY)",
"Low": "60% bonds (AGG), 40% equities (SPY)",
"Medium": "40% bonds (AGG), 60% equities (SPY)",
"High": "20% bonds (AGG), 80% equities (SPY)"
}
initial_recommendation = responseDict[risk_level]
### YOUR FINAL INVESTMENT RECOMMENDATION CODE ENDS HERE ###
# Return a message with the initial recommendation based on the risk level.
return close(
intent_request["sessionAttributes"],
"Fulfilled",
{
"contentType": "PlainText",
"content": """{} thank you for your information;
based on the risk level you defined, my recommendation is to choose an investment portfolio with {}
""".format(
first_name, initial_recommendation
),
},
)
### Intents Dispatcher ###
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
intent_name = intent_request["currentIntent"]["name"]
# Dispatch to bot's intent handlers
if intent_name == "RecommendPortfolio":
return recommend_portfolio(intent_request)
raise Exception("Intent with name " + intent_name + " not supported")
### Main Handler ###
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
return dispatch(event)
| 31.793296
| 203
| 0.648919
|
09e0cb4f2dd25891f072f5ec9cd0375cc673ee73
| 7,467
|
py
|
Python
|
ac-dc-converters/semi-converter.py
|
Leandro-Bertoluzzi/pyspice-power-electronics
|
960494b23e36c0fac61289744a64c991d62784a2
|
[
"MIT"
] | null | null | null |
ac-dc-converters/semi-converter.py
|
Leandro-Bertoluzzi/pyspice-power-electronics
|
960494b23e36c0fac61289744a64c991d62784a2
|
[
"MIT"
] | null | null | null |
ac-dc-converters/semi-converter.py
|
Leandro-Bertoluzzi/pyspice-power-electronics
|
960494b23e36c0fac61289744a64c991d62784a2
|
[
"MIT"
] | null | null | null |
#r# ============================================
#r# Semi-converter with SCR
#r# ============================================
#r# This example shows the simulation of a controlled semi-converter with SCRs and diodes
######################################### IMPORT MODULES #########################################
import matplotlib.pyplot as plt
import numpy as np
######################################### IMPORT UTILITIES #########################################
import sys
sys.path.insert(1, '../utilities/')
from utilities import format_output
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Doc.ExampleTools import find_libraries
from PySpice.Probe.Plot import plot
from PySpice.Spice.Library import SpiceLibrary
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
############################# LIBRARIES WITH DEFINITIONS OF COMPONENTS #############################
libraries_path = '..\libraries'
spice_library = SpiceLibrary(libraries_path)
#####################################################################################################
# DEFINING PLOTS
#####################################################################################################
figure1, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 10))
figure2, (ax3, ax4) = plt.subplots(2, 1, figsize=(20, 10))
####################################################################################################
# CIRCUIT DEFINITION
####################################################################################################
circuit = Circuit('Semi-converter with SCR')
# Input voltage
source = circuit.SinusoidalVoltageSource('input', 'A', 'B', amplitude=220@u_V, frequency=50@u_Hz)
# SCR gate triggering signal
alpha = 0.5 # trigger angle [0; 1]
delay_time1 = (source.period/2) * alpha
pulse_width = (source.period/2) * (1- alpha)
circuit.PulseVoltageSource('trigger1', 'gate1', 'output', 0@u_V, 1@u_V, delay_time=delay_time1, pulse_width=pulse_width, period=source.period, rise_time=1@u_ms, fall_time=1@u_ms)
delay_time2 = (source.period/2) * alpha + source.period/2
circuit.PulseVoltageSource('trigger2', 'gate2', 'output', 0@u_V, 1@u_V, delay_time=delay_time2, pulse_width=pulse_width, period=source.period, rise_time=1@u_ms, fall_time=1@u_ms)
# Define the rectifier bridge
# SCRs
circuit.include(spice_library['EC103D1'])
circuit.X('t1', 'EC103D1', 'A', 'gate1', 'output')
circuit.X('t2', 'EC103D1', 'B', 'gate2', 'output')
# Diodes
circuit.include(spice_library['BAV21'])
circuit.X('d1', 'BAV21', circuit.gnd, 'A')
circuit.X('d2', 'BAV21', circuit.gnd, 'B')
# Flyback diode Dm
circuit.X('Dm', 'BAV21', circuit.gnd, 'output')
# Series resistor as load
circuit.R('load', 'output', circuit.gnd, 100@u_Ω)
# Show the netlist
print('**** Circuit netlist: ****')
print(circuit)
####################################################################################################
# SIMULATION
####################################################################################################
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.transient(step_time=source.period/5000, end_time=source.period*6)
# Formatting results
voltages, currents = format_output(analysis, 'transient')
v_source = np.subtract(voltages['a'], voltages['b'])
v_gate1 = voltages['gate1']
v_gate2 = voltages['gate2']
v_output = voltages['output']
t = voltages['time']
# Plot
ax1.set_title('Semi-converter with resistive load')
ax1.set_xlabel('Time [s]')
ax1.set_ylabel('Voltage [V]')
ax1.grid()
ax1.plot(t, v_source)
ax1.plot(t, v_gate1)
ax1.plot(t, v_gate2)
ax1.plot(t, v_output)
ax1.legend(('input', 'gate1', 'gate2', 'output'), loc=(.05,.1))
ax1.set_ylim(float(-source.amplitude*1.1), float(source.amplitude*1.1))
####################################################################################################
# CIRCUIT DEFINITION - FILTERED
####################################################################################################
# We add a capacitor to filter the output voltage
circuit.C('1', 'output', circuit.gnd, 1@u_mF)
# Show the netlist
print('**** Circuit netlist (with filter): ****')
print(circuit)
####################################################################################################
# SIMULATION
####################################################################################################
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.transient(step_time=source.period/200, end_time=source.period*6)
# Formatting results
voltages, currents = format_output(analysis, 'transient')
v_source = np.subtract(voltages['a'], voltages['b'])
v_gate1 = voltages['gate1']
v_gate2 = voltages['gate2']
v_output = voltages['output']
t = voltages['time']
# Plot
ax2.set_title('Semi-converter with resistive load -- Filtered')
ax2.set_xlabel('Time [s]')
ax2.set_ylabel('Voltage [V]')
ax2.grid()
ax2.plot(t, v_source)
ax2.plot(t, v_gate1)
ax2.plot(t, v_gate2)
ax2.plot(t, v_output)
ax2.legend(('input', 'gate1', 'gate2', 'output'), loc=(.05,.1))
ax2.set_ylim(float(-source.amplitude*1.1), float(source.amplitude*1.1))
####################################################################################################
# CIRCUIT DEFINITION - RL LOAD
####################################################################################################
# We remove the filter capacitor and the resistive load
circuit.C1.detach()
circuit.Rload.detach()
# We add the RL load
circuit.R('_load', 'output', 'RL', 2.5@u_Ω)
circuit.L('_load', 'RL', circuit.gnd, 7@u_mH)
# Show the netlist
print('**** Circuit netlist (with RL load): ****')
print(circuit)
####################################################################################################
# SIMULATION
####################################################################################################
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.transient(step_time=source.period/5000, end_time=source.period*6)
# Formatting results
voltages, currents = format_output(analysis, 'transient')
v_source = np.subtract(voltages['a'], voltages['b'])
v_gate1 = voltages['gate1']
v_gate2 = voltages['gate2']
v_output = voltages['output']
t = voltages['time']
i_load = currents['l_load']
# Voltages
ax3.set_title('Semi-converter with RL load')
ax3.set_xlabel('Time [s]')
ax3.set_ylabel('Voltage [V]')
ax3.grid()
ax3.plot(t, v_source)
ax3.plot(t, v_gate1)
ax3.plot(t, v_gate2)
ax3.plot(t, v_output)
ax3.legend(('input', 'gate1', 'gate2', 'output'), loc=(.05,.1))
ax3.set_ylim(float(-source.amplitude*1.1), float(source.amplitude*1.1))
# Current
max_current = i_load.max()
min_current = i_load.min()
ax4.set_title('Semi-converter with RL load - Current')
ax4.set_xlabel('Time [s]')
ax4.set_ylabel('Current [A]')
ax4.grid()
ax4.plot(t, i_load)
ax4.legend('Load current', loc=(.05,.1))
ax4.set_ylim(float(1.1 * min_current), float(1.1 * max_current))
####################################################################################################
# Adjusts the spacing between subplots
figure1.tight_layout(pad=3.0)
figure2.tight_layout(pad=3.0)
# Shows all plots
plt.show()
| 36.247573
| 178
| 0.531539
|
71ba815ae67b60a1d7b512aa428676f0e2fd71f0
| 30,781
|
py
|
Python
|
venv/Lib/site-packages/aiohttp/http_parser.py
|
pay2win-jpg/DRandomBot
|
c44eb937a1b1837bba2ce7fa24324ebad369437b
|
[
"MIT"
] | 21
|
2021-03-29T05:49:35.000Z
|
2022-03-18T09:02:34.000Z
|
venv/Lib/site-packages/aiohttp/http_parser.py
|
pay2win-jpg/DRandomBot
|
c44eb937a1b1837bba2ce7fa24324ebad369437b
|
[
"MIT"
] | 95
|
2021-07-18T11:53:13.000Z
|
2022-02-17T20:18:22.000Z
|
venv/Lib/site-packages/aiohttp/http_parser.py
|
pay2win-jpg/DRandomBot
|
c44eb937a1b1837bba2ce7fa24324ebad369437b
|
[
"MIT"
] | 31
|
2021-03-29T05:54:57.000Z
|
2022-03-22T16:58:57.000Z
|
import abc
import asyncio
import collections
import re
import string
import zlib
from enum import IntEnum
from typing import Any, List, Optional, Tuple, Type, Union
from multidict import CIMultiDict, CIMultiDictProxy, istr
from yarl import URL
from . import hdrs
from .base_protocol import BaseProtocol
from .helpers import NO_EXTENSIONS, BaseTimerContext
from .http_exceptions import (
BadStatusLine,
ContentEncodingError,
ContentLengthError,
InvalidHeader,
LineTooLong,
TransferEncodingError,
)
from .http_writer import HttpVersion, HttpVersion10
from .log import internal_logger
from .streams import EMPTY_PAYLOAD, StreamReader
from .typedefs import RawHeaders
try:
import brotli
HAS_BROTLI = True
except ImportError: # pragma: no cover
HAS_BROTLI = False
__all__ = (
"HeadersParser",
"HttpParser",
"HttpRequestParser",
"HttpResponseParser",
"RawRequestMessage",
"RawResponseMessage",
)
ASCIISET = set(string.printable)
# See https://tools.ietf.org/html/rfc7230#section-3.1.1
# and https://tools.ietf.org/html/rfc7230#appendix-B
#
# method = token
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
# "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
# token = 1*tchar
METHRE = re.compile(r"[!#$%&'*+\-.^_`|~0-9A-Za-z]+")
VERSRE = re.compile(r"HTTP/(\d+).(\d+)")
HDRRE = re.compile(rb"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
RawRequestMessage = collections.namedtuple(
"RawRequestMessage",
[
"method",
"path",
"version",
"headers",
"raw_headers",
"should_close",
"compression",
"upgrade",
"chunked",
"url",
],
)
RawResponseMessage = collections.namedtuple(
"RawResponseMessage",
[
"version",
"code",
"reason",
"headers",
"raw_headers",
"should_close",
"compression",
"upgrade",
"chunked",
],
)
class ParseState(IntEnum):
PARSE_NONE = 0
PARSE_LENGTH = 1
PARSE_CHUNKED = 2
PARSE_UNTIL_EOF = 3
class ChunkState(IntEnum):
PARSE_CHUNKED_SIZE = 0
PARSE_CHUNKED_CHUNK = 1
PARSE_CHUNKED_CHUNK_EOF = 2
PARSE_MAYBE_TRAILERS = 3
PARSE_TRAILERS = 4
class HeadersParser:
def __init__(
self,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
) -> None:
self.max_line_size = max_line_size
self.max_headers = max_headers
self.max_field_size = max_field_size
def parse_headers(
self, lines: List[bytes]
) -> Tuple["CIMultiDictProxy[str]", RawHeaders]:
headers = CIMultiDict() # type: CIMultiDict[str]
raw_headers = []
lines_idx = 1
line = lines[1]
line_count = len(lines)
while line:
# Parse initial header name : value pair.
try:
bname, bvalue = line.split(b":", 1)
except ValueError:
raise InvalidHeader(line) from None
bname = bname.strip(b" \t")
bvalue = bvalue.lstrip()
if HDRRE.search(bname):
raise InvalidHeader(bname)
if len(bname) > self.max_field_size:
raise LineTooLong(
"request header name {}".format(
bname.decode("utf8", "xmlcharrefreplace")
),
str(self.max_field_size),
str(len(bname)),
)
header_length = len(bvalue)
# next line
lines_idx += 1
line = lines[lines_idx]
# consume continuation lines
continuation = line and line[0] in (32, 9) # (' ', '\t')
if continuation:
bvalue_lst = [bvalue]
while continuation:
header_length += len(line)
if header_length > self.max_field_size:
raise LineTooLong(
"request header field {}".format(
bname.decode("utf8", "xmlcharrefreplace")
),
str(self.max_field_size),
str(header_length),
)
bvalue_lst.append(line)
# next line
lines_idx += 1
if lines_idx < line_count:
line = lines[lines_idx]
if line:
continuation = line[0] in (32, 9) # (' ', '\t')
else:
line = b""
break
bvalue = b"".join(bvalue_lst)
else:
if header_length > self.max_field_size:
raise LineTooLong(
"request header field {}".format(
bname.decode("utf8", "xmlcharrefreplace")
),
str(self.max_field_size),
str(header_length),
)
bvalue = bvalue.strip()
name = bname.decode("utf-8", "surrogateescape")
value = bvalue.decode("utf-8", "surrogateescape")
headers.add(name, value)
raw_headers.append((bname, bvalue))
return (CIMultiDictProxy(headers), tuple(raw_headers))
class HttpParser(abc.ABC):
def __init__(
self,
protocol: Optional[BaseProtocol] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
limit: int = 2 ** 16,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
timer: Optional[BaseTimerContext] = None,
code: Optional[int] = None,
method: Optional[str] = None,
readall: bool = False,
payload_exception: Optional[Type[BaseException]] = None,
response_with_body: bool = True,
read_until_eof: bool = False,
auto_decompress: bool = True,
) -> None:
self.protocol = protocol
self.loop = loop
self.max_line_size = max_line_size
self.max_headers = max_headers
self.max_field_size = max_field_size
self.timer = timer
self.code = code
self.method = method
self.readall = readall
self.payload_exception = payload_exception
self.response_with_body = response_with_body
self.read_until_eof = read_until_eof
self._lines = [] # type: List[bytes]
self._tail = b""
self._upgraded = False
self._payload = None
self._payload_parser = None # type: Optional[HttpPayloadParser]
self._auto_decompress = auto_decompress
self._limit = limit
self._headers_parser = HeadersParser(max_line_size, max_headers, max_field_size)
@abc.abstractmethod
def parse_message(self, lines: List[bytes]) -> Any:
pass
def feed_eof(self) -> Any:
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
else:
# try to extract partial message
if self._tail:
self._lines.append(self._tail)
if self._lines:
if self._lines[-1] != "\r\n":
self._lines.append(b"")
try:
return self.parse_message(self._lines)
except Exception:
return None
def feed_data(
self,
data: bytes,
SEP: bytes = b"\r\n",
EMPTY: bytes = b"",
CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH,
METH_CONNECT: str = hdrs.METH_CONNECT,
SEC_WEBSOCKET_KEY1: istr = hdrs.SEC_WEBSOCKET_KEY1,
) -> Tuple[List[Any], bool, bytes]:
messages = []
if self._tail:
data, self._tail = self._tail + data, b""
data_len = len(data)
start_pos = 0
loop = self.loop
while start_pos < data_len:
# read HTTP message (request/response line + headers), \r\n\r\n
# and split by lines
if self._payload_parser is None and not self._upgraded:
pos = data.find(SEP, start_pos)
# consume \r\n
if pos == start_pos and not self._lines:
start_pos = pos + 2
continue
if pos >= start_pos:
# line found
self._lines.append(data[start_pos:pos])
start_pos = pos + 2
# \r\n\r\n found
if self._lines[-1] == EMPTY:
try:
msg = self.parse_message(self._lines)
finally:
self._lines.clear()
# payload length
length = msg.headers.get(CONTENT_LENGTH)
if length is not None:
try:
length = int(length)
except ValueError:
raise InvalidHeader(CONTENT_LENGTH)
if length < 0:
raise InvalidHeader(CONTENT_LENGTH)
# do not support old websocket spec
if SEC_WEBSOCKET_KEY1 in msg.headers:
raise InvalidHeader(SEC_WEBSOCKET_KEY1)
self._upgraded = msg.upgrade
method = getattr(msg, "method", self.method)
assert self.protocol is not None
# calculate payload
if (
(length is not None and length > 0)
or msg.chunked
and not msg.upgrade
):
payload = StreamReader(
self.protocol,
timer=self.timer,
loop=loop,
limit=self._limit,
)
payload_parser = HttpPayloadParser(
payload,
length=length,
chunked=msg.chunked,
method=method,
compression=msg.compression,
code=self.code,
readall=self.readall,
response_with_body=self.response_with_body,
auto_decompress=self._auto_decompress,
)
if not payload_parser.done:
self._payload_parser = payload_parser
elif method == METH_CONNECT:
payload = StreamReader(
self.protocol,
timer=self.timer,
loop=loop,
limit=self._limit,
)
self._upgraded = True
self._payload_parser = HttpPayloadParser(
payload,
method=msg.method,
compression=msg.compression,
readall=True,
auto_decompress=self._auto_decompress,
)
else:
if (
getattr(msg, "code", 100) >= 199
and length is None
and self.read_until_eof
):
payload = StreamReader(
self.protocol,
timer=self.timer,
loop=loop,
limit=self._limit,
)
payload_parser = HttpPayloadParser(
payload,
length=length,
chunked=msg.chunked,
method=method,
compression=msg.compression,
code=self.code,
readall=True,
response_with_body=self.response_with_body,
auto_decompress=self._auto_decompress,
)
if not payload_parser.done:
self._payload_parser = payload_parser
else:
payload = EMPTY_PAYLOAD # type: ignore
messages.append((msg, payload))
else:
self._tail = data[start_pos:]
data = EMPTY
break
# no parser, just store
elif self._payload_parser is None and self._upgraded:
assert not self._lines
break
# feed payload
elif data and start_pos < data_len:
assert not self._lines
assert self._payload_parser is not None
try:
eof, data = self._payload_parser.feed_data(data[start_pos:])
except BaseException as exc:
if self.payload_exception is not None:
self._payload_parser.payload.set_exception(
self.payload_exception(str(exc))
)
else:
self._payload_parser.payload.set_exception(exc)
eof = True
data = b""
if eof:
start_pos = 0
data_len = len(data)
self._payload_parser = None
continue
else:
break
if data and start_pos < data_len:
data = data[start_pos:]
else:
data = EMPTY
return messages, self._upgraded, data
def parse_headers(
self, lines: List[bytes]
) -> Tuple[
"CIMultiDictProxy[str]", RawHeaders, Optional[bool], Optional[str], bool, bool
]:
"""Parses RFC 5322 headers from a stream.
Line continuations are supported. Returns list of header name
and value pairs. Header name is in upper case.
"""
headers, raw_headers = self._headers_parser.parse_headers(lines)
close_conn = None
encoding = None
upgrade = False
chunked = False
# keep-alive
conn = headers.get(hdrs.CONNECTION)
if conn:
v = conn.lower()
if v == "close":
close_conn = True
elif v == "keep-alive":
close_conn = False
elif v == "upgrade":
upgrade = True
# encoding
enc = headers.get(hdrs.CONTENT_ENCODING)
if enc:
enc = enc.lower()
if enc in ("gzip", "deflate", "br"):
encoding = enc
# chunking
te = headers.get(hdrs.TRANSFER_ENCODING)
if te and "chunked" in te.lower():
chunked = True
return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
def set_upgraded(self, val: bool) -> None:
"""Set connection upgraded (to websocket) mode.
:param bool val: new state.
"""
self._upgraded = val
class HttpRequestParser(HttpParser):
"""Read request status line. Exception .http_exceptions.BadStatusLine
could be raised in case of any errors in status line.
Returns RawRequestMessage.
"""
def parse_message(self, lines: List[bytes]) -> Any:
# request line
line = lines[0].decode("utf-8", "surrogateescape")
try:
method, path, version = line.split(None, 2)
except ValueError:
raise BadStatusLine(line) from None
if len(path) > self.max_line_size:
raise LineTooLong(
"Status line is too long", str(self.max_line_size), str(len(path))
)
path_part, _hash_separator, url_fragment = path.partition("#")
path_part, _question_mark_separator, qs_part = path_part.partition("?")
# method
if not METHRE.match(method):
raise BadStatusLine(method)
# version
try:
if version.startswith("HTTP/"):
n1, n2 = version[5:].split(".", 1)
version_o = HttpVersion(int(n1), int(n2))
else:
raise BadStatusLine(version)
except Exception:
raise BadStatusLine(version)
# read headers
(
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
) = self.parse_headers(lines)
if close is None: # then the headers weren't set in the request
if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close
close = True
else: # HTTP 1.1 must ask to close.
close = False
return RawRequestMessage(
method,
path,
version_o,
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
# NOTE: `yarl.URL.build()` is used to mimic what the Cython-based
# NOTE: parser does, otherwise it results into the same
# NOTE: HTTP Request-Line input producing different
# NOTE: `yarl.URL()` objects
URL.build(
path=path_part,
query_string=qs_part,
fragment=url_fragment,
encoded=True,
),
)
class HttpResponseParser(HttpParser):
"""Read response status line and headers.
BadStatusLine could be raised in case of any errors in status line.
Returns RawResponseMessage"""
def parse_message(self, lines: List[bytes]) -> Any:
line = lines[0].decode("utf-8", "surrogateescape")
try:
version, status = line.split(None, 1)
except ValueError:
raise BadStatusLine(line) from None
try:
status, reason = status.split(None, 1)
except ValueError:
reason = ""
if len(reason) > self.max_line_size:
raise LineTooLong(
"Status line is too long", str(self.max_line_size), str(len(reason))
)
# version
match = VERSRE.match(version)
if match is None:
raise BadStatusLine(line)
version_o = HttpVersion(int(match.group(1)), int(match.group(2)))
# The status code is a three-digit number
try:
status_i = int(status)
except ValueError:
raise BadStatusLine(line) from None
if status_i > 999:
raise BadStatusLine(line)
# read headers
(
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
) = self.parse_headers(lines)
if close is None:
close = version_o <= HttpVersion10
return RawResponseMessage(
version_o,
status_i,
reason.strip(),
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
)
class HttpPayloadParser:
def __init__(
self,
payload: StreamReader,
length: Optional[int] = None,
chunked: bool = False,
compression: Optional[str] = None,
code: Optional[int] = None,
method: Optional[str] = None,
readall: bool = False,
response_with_body: bool = True,
auto_decompress: bool = True,
) -> None:
self._length = 0
self._type = ParseState.PARSE_NONE
self._chunk = ChunkState.PARSE_CHUNKED_SIZE
self._chunk_size = 0
self._chunk_tail = b""
self._auto_decompress = auto_decompress
self.done = False
# payload decompression wrapper
if response_with_body and compression and self._auto_decompress:
real_payload = DeflateBuffer(
payload, compression
) # type: Union[StreamReader, DeflateBuffer]
else:
real_payload = payload
# payload parser
if not response_with_body:
# don't parse payload if it's not expected to be received
self._type = ParseState.PARSE_NONE
real_payload.feed_eof()
self.done = True
elif chunked:
self._type = ParseState.PARSE_CHUNKED
elif length is not None:
self._type = ParseState.PARSE_LENGTH
self._length = length
if self._length == 0:
real_payload.feed_eof()
self.done = True
else:
if readall and code != 204:
self._type = ParseState.PARSE_UNTIL_EOF
elif method in ("PUT", "POST"):
internal_logger.warning( # pragma: no cover
"Content-Length or Transfer-Encoding header is required"
)
self._type = ParseState.PARSE_NONE
real_payload.feed_eof()
self.done = True
self.payload = real_payload
def feed_eof(self) -> None:
if self._type == ParseState.PARSE_UNTIL_EOF:
self.payload.feed_eof()
elif self._type == ParseState.PARSE_LENGTH:
raise ContentLengthError(
"Not enough data for satisfy content length header."
)
elif self._type == ParseState.PARSE_CHUNKED:
raise TransferEncodingError(
"Not enough data for satisfy transfer length header."
)
def feed_data(
self, chunk: bytes, SEP: bytes = b"\r\n", CHUNK_EXT: bytes = b";"
) -> Tuple[bool, bytes]:
# Read specified amount of bytes
if self._type == ParseState.PARSE_LENGTH:
required = self._length
chunk_len = len(chunk)
if required >= chunk_len:
self._length = required - chunk_len
self.payload.feed_data(chunk, chunk_len)
if self._length == 0:
self.payload.feed_eof()
return True, b""
else:
self._length = 0
self.payload.feed_data(chunk[:required], required)
self.payload.feed_eof()
return True, chunk[required:]
# Chunked transfer encoding parser
elif self._type == ParseState.PARSE_CHUNKED:
if self._chunk_tail:
chunk = self._chunk_tail + chunk
self._chunk_tail = b""
while chunk:
# read next chunk size
if self._chunk == ChunkState.PARSE_CHUNKED_SIZE:
pos = chunk.find(SEP)
if pos >= 0:
i = chunk.find(CHUNK_EXT, 0, pos)
if i >= 0:
size_b = chunk[:i] # strip chunk-extensions
else:
size_b = chunk[:pos]
try:
size = int(bytes(size_b), 16)
except ValueError:
exc = TransferEncodingError(
chunk[:pos].decode("ascii", "surrogateescape")
)
self.payload.set_exception(exc)
raise exc from None
chunk = chunk[pos + 2 :]
if size == 0: # eof marker
self._chunk = ChunkState.PARSE_MAYBE_TRAILERS
else:
self._chunk = ChunkState.PARSE_CHUNKED_CHUNK
self._chunk_size = size
self.payload.begin_http_chunk_receiving()
else:
self._chunk_tail = chunk
return False, b""
# read chunk and feed buffer
if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK:
required = self._chunk_size
chunk_len = len(chunk)
if required > chunk_len:
self._chunk_size = required - chunk_len
self.payload.feed_data(chunk, chunk_len)
return False, b""
else:
self._chunk_size = 0
self.payload.feed_data(chunk[:required], required)
chunk = chunk[required:]
self._chunk = ChunkState.PARSE_CHUNKED_CHUNK_EOF
self.payload.end_http_chunk_receiving()
# toss the CRLF at the end of the chunk
if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK_EOF:
if chunk[:2] == SEP:
chunk = chunk[2:]
self._chunk = ChunkState.PARSE_CHUNKED_SIZE
else:
self._chunk_tail = chunk
return False, b""
# if stream does not contain trailer, after 0\r\n
# we should get another \r\n otherwise
# trailers needs to be skiped until \r\n\r\n
if self._chunk == ChunkState.PARSE_MAYBE_TRAILERS:
head = chunk[:2]
if head == SEP:
# end of stream
self.payload.feed_eof()
return True, chunk[2:]
# Both CR and LF, or only LF may not be received yet. It is
# expected that CRLF or LF will be shown at the very first
# byte next time, otherwise trailers should come. The last
# CRLF which marks the end of response might not be
# contained in the same TCP segment which delivered the
# size indicator.
if not head:
return False, b""
if head == SEP[:1]:
self._chunk_tail = head
return False, b""
self._chunk = ChunkState.PARSE_TRAILERS
# read and discard trailer up to the CRLF terminator
if self._chunk == ChunkState.PARSE_TRAILERS:
pos = chunk.find(SEP)
if pos >= 0:
chunk = chunk[pos + 2 :]
self._chunk = ChunkState.PARSE_MAYBE_TRAILERS
else:
self._chunk_tail = chunk
return False, b""
# Read all bytes until eof
elif self._type == ParseState.PARSE_UNTIL_EOF:
self.payload.feed_data(chunk, len(chunk))
return False, b""
class DeflateBuffer:
"""DeflateStream decompress stream and feed data into specified stream."""
def __init__(self, out: StreamReader, encoding: Optional[str]) -> None:
self.out = out
self.size = 0
self.encoding = encoding
self._started_decoding = False
if encoding == "br":
if not HAS_BROTLI: # pragma: no cover
raise ContentEncodingError(
"Can not decode content-encoding: brotli (br). "
"Please install `brotlipy`"
)
self.decompressor = brotli.Decompressor()
else:
zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS
self.decompressor = zlib.decompressobj(wbits=zlib_mode)
def set_exception(self, exc: BaseException) -> None:
self.out.set_exception(exc)
def feed_data(self, chunk: bytes, size: int) -> None:
if not size:
return
self.size += size
# RFC1950
# bits 0..3 = CM = 0b1000 = 8 = "deflate"
# bits 4..7 = CINFO = 1..7 = windows size.
if (
not self._started_decoding
and self.encoding == "deflate"
and chunk[0] & 0xF != 8
):
# Change the decoder to decompress incorrectly compressed data
# Actually we should issue a warning about non-RFC-compliant data.
self.decompressor = zlib.decompressobj(wbits=-zlib.MAX_WBITS)
try:
chunk = self.decompressor.decompress(chunk)
except Exception:
raise ContentEncodingError(
"Can not decode content-encoding: %s" % self.encoding
)
self._started_decoding = True
if chunk:
self.out.feed_data(chunk, len(chunk))
def feed_eof(self) -> None:
chunk = self.decompressor.flush()
if chunk or self.size > 0:
self.out.feed_data(chunk, len(chunk))
if self.encoding == "deflate" and not self.decompressor.eof:
raise ContentEncodingError("deflate")
self.out.feed_eof()
def begin_http_chunk_receiving(self) -> None:
self.out.begin_http_chunk_receiving()
def end_http_chunk_receiving(self) -> None:
self.out.end_http_chunk_receiving()
HttpRequestParserPy = HttpRequestParser
HttpResponseParserPy = HttpResponseParser
RawRequestMessagePy = RawRequestMessage
RawResponseMessagePy = RawResponseMessage
try:
if not NO_EXTENSIONS:
from ._http_parser import ( # type: ignore
HttpRequestParser,
HttpResponseParser,
RawRequestMessage,
RawResponseMessage,
)
HttpRequestParserC = HttpRequestParser
HttpResponseParserC = HttpResponseParser
RawRequestMessageC = RawRequestMessage
RawResponseMessageC = RawResponseMessage
except ImportError: # pragma: no cover
pass
| 34.125277
| 88
| 0.495435
|
e29a8707d45357f22f17d7040edf913b1e8006d8
| 4,042
|
py
|
Python
|
anthology/database.py
|
surfmikko/anthology
|
96e5ac755c5c5db2f1c1ff2d6e0e1f8be459de97
|
[
"MIT"
] | null | null | null |
anthology/database.py
|
surfmikko/anthology
|
96e5ac755c5c5db2f1c1ff2d6e0e1f8be459de97
|
[
"MIT"
] | 3
|
2017-04-23T06:17:32.000Z
|
2017-04-23T13:42:41.000Z
|
anthology/database.py
|
surfmikko/anthology
|
96e5ac755c5c5db2f1c1ff2d6e0e1f8be459de97
|
[
"MIT"
] | null | null | null |
"""MongoDB backend"""
from pymongo import MongoClient, ASCENDING
from bson import ObjectId
class DatabaseError(Exception):
"""Raised for unrecoverable database errors"""
pass
def connection():
"""Return MongoClient connection object.
pymongo.MongoClient has it's own instance caching/connection pool.
MongoMock however does not, so we will mockup this in tests and provide our
own instance cache.
"""
return MongoClient()
def db_songs():
"""Return songs collection"""
return connection().anthology.songs
def db_averages():
"""Return averages collection"""
return connection().anthology.averages
def get_songs_list(previous_id, limit, search_term=None, search_word=None):
"""Return songs from database. Parameters `previous_id` and `limit` are
used to iterate over result set.
Search is performed using parameter `search_term`. This performs search
on text index for documents. Index is always required and without it search
will fail.
:offset: Number of items to skip
:limit: Number of returned items
:search_term: Partial word search term
:search_word: Full word search term
:returns: Iterable cursor object
"""
query = [{}]
# Search for partial words
if search_term:
regex = {'$regex': search_term, '$options': 'i'}
query.append({'$or': [{'title': regex}, {'artist': regex}]})
# Search for full words
if search_word:
search = {'$text': {
'$search': search_word,
'$language': 'none',
'$caseSensitive': False,
'$diacriticSensitive': False
}}
query.append(search)
if previous_id:
query.append({'_id': {'$gt': ObjectId(previous_id)}})
return db_songs().find({'$and': query}).sort('_id', ASCENDING).limit(limit)
def get_average_difficulty(level):
"""Return average difficulty for all songs on given level.
If difficulty is not given, return difficulty for all songs in database.
:level: Song level to search
:collection: Collection to search from
:returns: Dictionary with level and average difficulty
"""
collection = db_songs()
pipeline = [
{"$group": {
"_id": None,
"average_difficulty": {"$avg": "$difficulty"}
}}]
if level:
pipeline.insert(0, {"$match": {"level": level}})
results = collection.aggregate(pipeline)
try:
result = results.next()
result["algorithm"] = 'trivial'
return result
except StopIteration:
return {}
def filter_songs_by_level(songs, level):
"""Filter out all bad values"""
for song in songs:
if level is None:
yield song
if level == song["level"]:
yield song
def get_average_difficulty_fun(level):
"""Just for fun implementation for averages.
Most data was already batch processed beforehand, so we can calculate rest
efficiently in Python.
If difficulty is not given, return difficulty for all songs in database.
:level: Song level to search
:collection: Collection to search from
:returns: Dictionary with level and average difficulty
"""
totals = db_averages().find()
total_difficulty = 0
number_of_songs = 0
for total in filter_songs_by_level(totals, level):
# BUGBUG: This will overflow with big dataset
total_difficulty += total["total_difficulty"]
number_of_songs += total["number_of_songs"]
if number_of_songs == 0:
return {}
average_difficulty = total_difficulty / float(number_of_songs)
return {
'level': level,
'average_difficulty': average_difficulty,
'algorithm': 'fun'}
def get_song(song_id):
"""Return song with given id"""
return db_songs().find_one({'_id': ObjectId(song_id)})
def update_song(song_id, fields):
"""Return song with given id"""
db_songs().update_one(
{'_id': ObjectId(song_id)},
{'$set': fields})
| 25.421384
| 79
| 0.645473
|
e9a7829d4b1c746d18ec01fb857b9f676f62f6f1
| 8,780
|
py
|
Python
|
evaluation/privacy_entropy.py
|
DeqingFu/robustdg
|
42519d42d80890a636b0d3fcb78e7d45561d0fb5
|
[
"MIT"
] | null | null | null |
evaluation/privacy_entropy.py
|
DeqingFu/robustdg
|
42519d42d80890a636b0d3fcb78e7d45561d0fb5
|
[
"MIT"
] | null | null | null |
evaluation/privacy_entropy.py
|
DeqingFu/robustdg
|
42519d42d80890a636b0d3fcb78e7d45561d0fb5
|
[
"MIT"
] | null | null | null |
#General Imports
import sys
import numpy as np
import pandas as pd
import argparse
import copy
import random
import json
import pickle
#PyTorch
import torch
from torch.autograd import grad
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import torch.utils.data as data_utils
#Tensorflow
from absl import flags
import tensorflow as tf
from tensorflow.keras import layers
#Sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from mia.estimators import ShadowModelBundle, AttackModelBundle, prepare_attack_data
from .base_eval import BaseEval
from utils.privacy_attack import to_onehot, mia
class PrivacyEntropy(BaseEval):
def __init__(self, args, train_dataset, val_dataset, test_dataset, base_res_dir, run, cuda):
super().__init__(args, train_dataset, val_dataset, test_dataset, base_res_dir, run, cuda)
def get_label_logits(self):
#Train Environment Data
train_data={}
train_data['logits']=[]
train_data['labels']=[]
for batch_idx, (x_e, y_e ,d_e, idx_e) in enumerate(self.train_dataset['data_loader']):
#Random Shuffling along the batch axis
rand_indices= torch.randperm(x_e.size()[0])
x_e= x_e[rand_indices]
y_e= y_e[rand_indices]
with torch.no_grad():
x_e= x_e.to(self.cuda)
y_e= y_e.to(self.cuda)
out= F.softmax(self.forward(x_e), dim=1)
train_data['logits'].append(out)
train_data['labels'].append(y_e)
train_data['logits']= torch.cat(train_data['logits'], dim=0)
train_data['labels']= torch.cat(train_data['labels'], dim=0)
#Test Environment Data
test_data={}
test_data['logits']=[]
test_data['labels']=[]
for batch_idx, (x_e, y_e ,d_e, idx_e) in enumerate(self.test_dataset['data_loader']):
#Random Shuffling along the batch axis
rand_indices= torch.randperm(x_e.size()[0])
x_e= x_e[rand_indices]
y_e= y_e[rand_indices]
with torch.no_grad():
x_e= x_e.to(self.cuda)
y_e= y_e.to(self.cuda)
out= F.softmax(self.forward(x_e), dim=1)
test_data['logits'].append(out)
test_data['labels'].append(y_e)
test_data['logits']= torch.cat(test_data['logits'], dim=0)
test_data['labels']= torch.cat(test_data['labels'], dim=0)
print('Train Logits: ', train_data['logits'].shape, 'Train Labels: ', train_data['labels'].shape )
print('Test Logits: ', test_data['logits'].shape, 'Test Labels: ', test_data['labels'].shape )
return train_data, test_data
def create_attack_data(self, train_data, test_data, sample_size, case='train'):
if case == 'train':
train_logits= train_data['logits'][:sample_size]
train_labels= train_data['labels'][:sample_size]
test_logits= test_data['logits'][:sample_size]
test_labels= test_data['labels'][:sample_size]
elif case == 'test':
train_logits= train_data['logits'][-1-sample_size:-1]
train_labels= train_data['labels'][-1-sample_size:-1]
test_logits= test_data['logits'][-1-sample_size:-1]
test_labels= test_data['labels'][-1-sample_size:-1]
attack_data={}
attack_data['logits']= torch.cat( (train_logits, test_logits), dim=0 )
attack_data['labels']= torch.cat( (train_labels, test_labels), dim=0 )
attack_data['members']= torch.cat( (torch.ones((sample_size,1)), torch.zeros((sample_size,1))), dim=0).to(self.cuda)
print(case, attack_data['logits'].shape, attack_data['labels'].shape, attack_data['members'].shape)
return attack_data
def eval_entropy_attack(self, data, threshold_data, scale=1.0, case='train'):
class_labels= torch.argmax(data['labels'], dim=1)
acc=0.0
size=0
#Get class thresholds using only in members in train data
if case == 'train':
for y_c in range(self.args.out_classes):
indices= class_labels == y_c
logits= data['logits'][indices]
labels= data['labels'][indices]
members= data['members'][indices]
members= members.view(members.shape[0])
indices= members == 1
logits= logits[indices]
labels= labels[indices]
members= members[indices]
# print('Attack Logits and Labels', logits.shape, labels.shape)
F_y= torch.sum( logits*labels, dim=1)
F_i= logits*(1.0-labels)
# print('Label Check: ', labels[0], 1.0 - labels[0])
# print('F_y, F_i', F_y.shape, F_i.shape)
# print('Neg term: ', (F_i*torch.log(1.0-F_i)).shape, F_i[0])
metric= -1*(1.0 - F_y)*torch.log(F_y) -1*torch.sum( F_i*torch.log(1.0-F_i), dim=1 )
threshold_data[y_c]= torch.max(metric)
print('Label: ', y_c, threshold_data[y_c])
mem_predict= 1.0*(metric < threshold_data[y_c])
acc= torch.sum( mem_predict == members ).item()
size= mem_predict.shape[0]
print('Accuracy: ', mem_predict[0], members[0], (mem_predict == members)[0], 100*acc/size )
return
# Evaluate Membership Accuracy
for y_c in range(self.args.out_classes):
indices= class_labels == y_c
logits= data['logits'][indices]
labels= data['labels'][indices]
members= data['members'][indices]
members= members.view(members.shape[0])
F_y= torch.sum( logits*labels, dim=1)
F_i= logits*(1.0-labels)
metric= -1*(1.0 - F_y)*torch.log(F_y) -1*torch.sum( F_i*torch.log(1.0-F_i), dim=1 )
mem_predict= 1.0*(metric < (threshold_data[y_c]/scale))
acc+= torch.sum( mem_predict == members ).item()
size+= mem_predict.shape[0]
print('Accuracy: ', case, 100*acc/size)
return 100*acc/size
def get_metric_eval(self):
'''
Train Size: 2*sample_size
Test Size: 2*sample_size
'''
final_res={}
acc_train=[]
acc_test=[]
precision=[]
recall=[]
sample_size= self.args.mia_sample_size
# Create Attack Model train and test dataset
train_data, test_data= self.get_label_logits()
train_attack_data= self.create_attack_data(train_data, test_data, sample_size, 'train')
test_attack_data= self.create_attack_data(train_data, test_data, sample_size, 'test')
threshold_data={}
for y_c in range(self.args.out_classes):
threshold_data[y_c]=0
self.eval_entropy_attack(train_attack_data, threshold_data, case='train')
max_train_acc=0.0
max_scale= -1
lim_scale= max(threshold_data.values())
if lim_scale <= 1:
lim_scale = 10
else:
lim_scale =int(lim_scale)
print('Upper Limit on Scale: ', lim_scale)
for scale in np.random.randint(1, lim_scale, 10):
train_metric= self.eval_entropy_attack(train_attack_data, threshold_data, scale= scale, case= 'test')
print('Scale: ', scale, ' Acc: ', train_metric)
if train_metric > max_train_acc:
max_train_acc= train_metric
max_scale= scale
print('Max Scale: ', max_scale, 'Max Acc: ', max_train_acc)
print('Threshold after training')
for y_c in range(self.args.out_classes):
print( 'Label : ', y_c, threshold_data[y_c]/max_scale )
test_metric= self.eval_entropy_attack(test_attack_data, threshold_data, scale= max_scale, case= 'test')
print('\nTrain Attack accuracy: ', max_train_acc)
print('\nTest Attack accuracy: ', test_metric)
self.metric_score['train_acc']= train_metric
self.metric_score['test_acc']= test_metric
return
| 38.508772
| 129
| 0.583485
|
db3711689267f392b79138748e937c1c6564cf40
| 59,279
|
py
|
Python
|
leo/core/leoMenu.py
|
drmikecrowe/leo-editor
|
16d8e1e8564799496c7a90c5d3cc85461deca7e7
|
[
"MIT"
] | null | null | null |
leo/core/leoMenu.py
|
drmikecrowe/leo-editor
|
16d8e1e8564799496c7a90c5d3cc85461deca7e7
|
[
"MIT"
] | null | null | null |
leo/core/leoMenu.py
|
drmikecrowe/leo-editor
|
16d8e1e8564799496c7a90c5d3cc85461deca7e7
|
[
"MIT"
] | null | null | null |
#@+leo-ver=5-thin
#@+node:ekr.20031218072017.3749: * @file leoMenu.py
"""Gui-independent menu handling for Leo."""
import leo.core.leoGlobals as g
import sys
#@+others
#@+node:ekr.20031218072017.3750: ** class LeoMenu
class LeoMenu(object):
"""The base class for all Leo menus."""
#@+others
#@+node:ekr.20120124042346.12938: *3* LeoMenu.Birth
def __init__(self, frame):
self.c = frame.c
self.enable_dict = {} # Created by finishCreate.
self.frame = frame
self.isNull = False
self.menus = {} # Menu dictionary.
self.menuShortcuts = {}
def finishCreate(self):
self.define_enable_dict()
#@+node:ekr.20120124042346.12937: *4* LeoMenu.define_enable_table
#@@nobeautify
def define_enable_dict (self):
# pylint: disable=unnecessary-lambda
# The lambdas *are* necessary.
c = self.c
if not c.commandsDict:
return # This is not an error: it happens during init.
self.enable_dict = d = {
# File menu...
# 'revert': True, # Revert is always enabled.
# 'open-with': True, # Open-With is always enabled.
# Edit menu...
'undo': c.undoer.canUndo,
'redo': c.undoer.canRedo,
'extract-names': c.canExtractSectionNames,
'extract': c.canExtract,
'match-brackets': c.canFindMatchingBracket,
# Top-level Outline menu...
'cut-node': c.canCutOutline,
'delete-node': c.canDeleteHeadline,
'paste-node': c.canPasteOutline,
'paste-retaining-clones': c.canPasteOutline,
'clone-node': c.canClone,
'sort-siblings': c.canSortSiblings,
'hoist': c.canHoist,
'de-hoist': c.canDehoist,
# Outline:Expand/Contract menu...
'contract-parent': c.canContractParent,
'contract-node': lambda: c.p.hasChildren() and c.p.isExpanded(),
'contract-or-go-left': lambda: c.p.hasChildren() and c.p.isExpanded() or c.p.hasParent(),
'expand-node': lambda: c.p.hasChildren() and not c.p.isExpanded(),
'expand-prev-level': lambda: c.p.hasChildren() and c.p.isExpanded(),
'expand-next-level': lambda: c.p.hasChildren(),
'expand-to-level-1': lambda: c.p.hasChildren() and c.p.isExpanded(),
'expand-or-go-right': lambda: c.p.hasChildren(),
# Outline:Move menu...
'move-outline-down': lambda: c.canMoveOutlineDown(),
'move-outline-left': lambda: c.canMoveOutlineLeft(),
'move-outline-right': lambda: c.canMoveOutlineRight(),
'move-outline-up': lambda: c.canMoveOutlineUp(),
'promote': lambda: c.canPromote(),
'demote': lambda: c.canDemote(),
# Outline:Go To menu...
'goto-prev-history-node': lambda: c.nodeHistory.canGoToPrevVisited(),
'goto-next-history-node': lambda: c.nodeHistory.canGoToNextVisited(),
'goto-prev-visible': lambda: c.canSelectVisBack(),
'goto-next-visible': lambda: c.canSelectVisNext(),
# These are too slow...
# 'go-to-next-marked': c.canGoToNextMarkedHeadline,
# 'go-to-next-changed': c.canGoToNextDirtyHeadline,
'goto-next-clone': lambda: c.p.isCloned(),
'goto-prev-node': lambda: c.canSelectThreadBack(),
'goto-next-node': lambda: c.canSelectThreadNext(),
'goto-parent': lambda: c.p.hasParent(),
'goto-prev-sibling': lambda: c.p.hasBack(),
'goto-next-sibling': lambda: c.p.hasNext(),
# Outline:Mark menu...
'mark-subheads': lambda: c.p.hasChildren(),
# too slow...
# 'mark-changed-items': c.canMarkChangedHeadlines,
}
for i in range(1,9):
d ['expand-to-level-%s' % (i)] = lambda: c.p.hasChildren()
if 0: # Initial testing.
commandKeys = list(c.commandsDict.keys())
for key in sorted(d.keys()):
if key not in commandKeys:
g.trace('*** bad entry for %s' % (key))
#@+node:ekr.20031218072017.3775: *3* error and oops
def oops(self):
g.pr("LeoMenu oops:", g.callers(4), "should be overridden in subclass")
def error(self, s):
g.error('', s)
#@+node:ekr.20031218072017.3781: *3* Gui-independent menu routines
#@+node:ekr.20060926213642: *4* capitalizeMinibufferMenuName
#@@nobeautify
def capitalizeMinibufferMenuName(self, s, removeHyphens):
result = []
for i, ch in enumerate(s):
prev = s[i - 1] if i > 0 else ''
prevprev = s[i - 2] if i > 1 else ''
if (
i == 0 or
i == 1 and prev == '&' or
prev == '-' or
prev == '&' and prevprev == '-'
):
result.append(ch.capitalize())
elif removeHyphens and ch == '-':
result.append(' ')
else:
result.append(ch)
return ''.join(result)
#@+node:ekr.20031218072017.3785: *4* createMenusFromTables & helpers
def createMenusFromTables(self):
c = self.c
aList = c.config.getMenusList()
if aList:
self.createMenusFromConfigList(aList)
else:
self.defineMenuTables()
self.createFileMenuFromTable()
self.createEditMenuFromTable()
self.createOutlineMenuFromTable()
g.doHook("create-optional-menus", c=c)
self.createCmndsMenuFromTable()
self.createWindowMenuFromTable()
self.createHelpMenuFromTable()
#@+node:ekr.20031218072017.3790: *5* createFileMenuFromTable
def createFileMenuFromTable(self):
c = self.c
fileMenu = self.createNewMenu("&File")
self.createMenuEntries(fileMenu, self.fileMenuTopTable)
self.createNewMenu("Open &With...", "File")
self.createMenuEntries(fileMenu, self.fileMenuTop2Table)
#@+<< create the recent files submenu >>
#@+node:ekr.20031218072017.3791: *6* << create the recent files submenu >>
self.createNewMenu("Recent &Files...", "File")
if 0: # Not needed, and causes problems in wxWindows...
g.app.recentFilesManager.createRecentFilesMenuItems(c)
#@-<< create the recent files submenu >>
self.add_separator(fileMenu)
#@+<< create the read/write submenu >>
#@+node:ekr.20031218072017.3792: *6* << create the read/write submenu >>
readWriteMenu = self.createNewMenu("&Read/Write...", "File")
self.createMenuEntries(readWriteMenu, self.fileMenuReadWriteMenuTable)
#@-<< create the read/write submenu >>
#@+<< create the tangle submenu >>
#@+node:ekr.20031218072017.3793: *6* << create the tangle submenu >>
tangleMenu = self.createNewMenu("Tan&gle...", "File")
self.createMenuEntries(tangleMenu, self.fileMenuTangleMenuTable)
#@-<< create the tangle submenu >>
#@+<< create the untangle submenu >>
#@+node:ekr.20031218072017.3794: *6* << create the untangle submenu >>
untangleMenu = self.createNewMenu("&Untangle...", "File")
self.createMenuEntries(untangleMenu, self.fileMenuUntangleMenuTable)
#@-<< create the untangle submenu >>
#@+<< create the import submenu >>
#@+node:ekr.20031218072017.3795: *6* << create the import submenu >>
importMenu = self.createNewMenu("&Import...", "File")
self.createMenuEntries(importMenu, self.fileMenuImportMenuTable)
#@-<< create the import submenu >>
#@+<< create the export submenu >>
#@+node:ekr.20031218072017.3796: *6* << create the export submenu >>
exportMenu = self.createNewMenu("&Export...", "File")
self.createMenuEntries(exportMenu, self.fileMenuExportMenuTable)
#@-<< create the export submenu >>
self.add_separator(fileMenu)
self.createMenuEntries(fileMenu, self.fileMenuTop3MenuTable)
#@+node:ekr.20031218072017.3786: *5* createEditMenuFromTable
def createEditMenuFromTable(self):
editMenu = self.createNewMenu("&Edit")
self.createMenuEntries(editMenu, self.editMenuTopTable)
#@+<< create the edit body submenu >>
#@+node:ekr.20031218072017.3787: *6* << create the edit body submenu >>
editBodyMenu = self.createNewMenu("Edit &Body...", "Edit")
self.createMenuEntries(editBodyMenu, self.editMenuEditBodyTable)
#@-<< create the edit body submenu >>
#@+<< create the edit headline submenu >>
#@+node:ekr.20031218072017.3788: *6* << create the edit headline submenu >>
editHeadlineMenu = self.createNewMenu("Edit &Headline...", "Edit")
self.createMenuEntries(editHeadlineMenu, self.editMenuEditHeadlineTable)
#@-<< create the edit headline submenu >>
#@+<< create the find submenu >>
#@+node:ekr.20031218072017.3789: *6* << create the find submenu >>
findMenu = self.createNewMenu("&Find...", "Edit")
self.createMenuEntries(findMenu, self.editMenuFindMenuTable)
#@-<< create the find submenu >>
self.createMenuEntries(editMenu, self.editMenuTop2Table)
#@+node:ekr.20031218072017.3797: *5* createOutlineMenuFromTable
def createOutlineMenuFromTable(self):
outlineMenu = self.createNewMenu("&Outline")
self.createMenuEntries(outlineMenu, self.outlineMenuTopMenuTable)
#@+<< create check submenu >>
#@+node:ekr.20040711140738.1: *6* << create check submenu >>
checkOutlineMenu = self.createNewMenu("Chec&k...", "Outline")
self.createMenuEntries(checkOutlineMenu, self.outlineMenuCheckOutlineMenuTable)
#@-<< create check submenu >>
#@+<< create expand/contract submenu >>
#@+node:ekr.20031218072017.3798: *6* << create expand/contract submenu >>
expandMenu = self.createNewMenu("E&xpand/Contract...", "Outline")
self.createMenuEntries(expandMenu, self.outlineMenuExpandContractMenuTable)
#@-<< create expand/contract submenu >>
#@+<< create move submenu >>
#@+node:ekr.20031218072017.3799: *6* << create move submenu >>
moveSelectMenu = self.createNewMenu("&Move...", "Outline")
self.createMenuEntries(moveSelectMenu, self.outlineMenuMoveMenuTable)
#@-<< create move submenu >>
#@+<< create mark submenu >>
#@+node:ekr.20031218072017.3800: *6* << create mark submenu >>
markMenu = self.createNewMenu("M&ark/Unmark...", "Outline")
self.createMenuEntries(markMenu, self.outlineMenuMarkMenuTable)
#@-<< create mark submenu >>
#@+<< create goto submenu >>
#@+node:ekr.20031218072017.3801: *6* << create goto submenu >>
gotoMenu = self.createNewMenu("&Go To...", "Outline")
self.createMenuEntries(gotoMenu, self.outlineMenuGoToMenuTable)
#@-<< create goto submenu >>
#@+node:ekr.20050921103736: *5* createCmndsMenuFromTable
def createCmndsMenuFromTable(self):
self.createNewMenu('&Cmds')
for name, table in (
# &: a,b,c,d,f,g,h,i,m,n,o,p,r,s,t,u
('&Abbrev...', self.cmdsMenuAbbrevTable),
('Body E&ditors', self.cmdsMenuBodyEditorsTable),
('&Buffers...', self.cmdsMenuBuffersTable),
('&Chapters...', self.cmdsMenuChaptersTable),
('C&ursor/Selection...', []),
('&Focus...', self.cmdsMenuFocusTable),
('&Macro...', self.cmdsMenuMacroTable),
('M&inibuffer', self.cmdsMenuMinibufferTable),
#('&Panes...', self.cmdsMenuPanesTable),
('&Pickers...', self.cmdsMenuPickersTable),
('&Rectangles...', self.cmdsMenuRectanglesTable),
('Re&gisters...', self.cmdsMenuRegistersTable),
('R&un Script/Tests', self.cmdsMenuRunTable),
('Scr&olling...', self.cmdsMenuScrollTable),
('Spell C&heck...', self.cmdsMenuSpellCheckTable),
('&Text Commands', self.cmdsMenuTextTable),
('Toggle Setti&ngs', self.cmdsMenuToggleTable),
):
if table == self.cmdsMenuChaptersTable and not self.c.chapterController:
continue
menu = self.createNewMenu(name, '&Cmds')
self.createMenuEntries(menu, table)
for name, table in (
# &: b,e,f,s,t,x
('Cursor &Back...', self.cursorMenuBackTable),
('Cursor Back &Extend Selection...', self.cursorMeuuBackExtendTable),
('Cursor Extend &To...', self.cursorMenuExtendTable),
('Cursor &Forward...', self.cursorMenuForwardTable),
('Cursor Forward E&xtend Selection...', self.cursorMenuForwardExtendTable),
):
menu = self.createNewMenu(name, 'C&ursor/Selection...')
self.createMenuEntries(menu, table)
#@+node:ekr.20031218072017.3802: *5* createWindowMenuFromTable
def createWindowMenuFromTable(self):
windowMenu = self.createNewMenu("&Window")
self.createMenuEntries(windowMenu, self.windowMenuTopTable)
#@+node:ekr.20031218072017.3803: *5* createHelpMenuFromTable
def createHelpMenuFromTable(self):
if sys.platform == 'darwin':
pass # self.getMacHelpMenu(table)
else:
helpMenu = self.createNewMenu("&Help")
self.createMenuEntries(helpMenu, self.helpMenuTable)
#@+node:ekr.20070926135612: *5* createMenusFromConfigList & helpers (LeoMenu)
def createMenusFromConfigList(self, aList):
'''Create menus from aList instead of 'hard coded' menus.
The 'top' menu has already been created.'''
c = self.c
tag = '@menu'
for z in aList:
kind, val, val2 = z
if kind.startswith(tag):
name = kind[len(tag):].strip()
if not self.handleSpecialMenus(name, parentName=None):
# Fix #528: Don't create duplicate menu items.
menu = self.createNewMenu(name) # Create top-level menu.
if menu:
self.createMenuFromConfigList(name, val, level=0)
else:
self.error('%s %s not valid outside @menu tree' % (kind, val))
aList = c.config.getOpenWith()
if aList:
# a list of dicts.
self.createOpenWithMenuFromTable(aList)
#@+node:ekr.20070927082205: *6* createMenuFromConfigList
def createMenuFromConfigList(self, parentName, aList, level=0):
"""Build menu based on nested list
List entries are either:
['@item', 'command-name', 'optional-view-name']
or:
['@menu Submenu name', <nested list>, None]
:param str parentName: name of menu under which to place this one
:param list aList: list of entries as described above
"""
table = []; parentMenu = self.getMenu(parentName)
for z in aList:
kind, val, val2 = z
if kind.startswith('@menu'):
# Menu names can be unicode without any problem.
name = kind[5:].strip()
if table:
self.createMenuEntries(parentMenu, table)
if not self.handleSpecialMenus(name, parentName, table):
menu = self.createNewMenu(name, parentName) # Create submenu of parent menu.
if menu: # Partial fix for #528.
self.createMenuFromConfigList(name, val, level + 1)
table = []
elif kind == '@item':
name = str(val) # Item names must always be ascii.
if val2:
# Translated names can be unicode.
table.append((val2, name),)
else:
table.append(name)
else:
g.trace('can not happen: bad kind:', kind)
if table:
self.createMenuEntries(parentMenu, table)
#@+node:ekr.20070927172712: *6* handleSpecialMenus
def handleSpecialMenus(self, name, parentName, table=None):
'''Handle a special menu if name is the name of a special menu.
return True if this method handles the menu.'''
c = self.c
if table is None: table = []
name2 = name.replace('&', '').replace(' ', '').lower()
if name2 == 'plugins':
# Create the plugins menu using a hook.
g.doHook("create-optional-menus", c=c)
return True
elif name2.startswith('recentfiles'):
# Just create the menu.
# createRecentFilesMenuItems will create the contents later.
self.createNewMenu(name, parentName)
return True
elif name2 == 'help' and sys.platform == 'darwin':
helpMenu = self.getMacHelpMenu(table)
return helpMenu is not None
else:
return False
#@+node:ekr.20031218072017.3752: *4* defineMenuTables & helpers
def defineMenuTables(self):
self.defineEditMenuTables()
self.defineFileMenuTables()
self.defineOutlineMenuTables()
self.defineWindowMenuTables()
self.defineCmdsMenuTables()
self.defineHelpMenuTables()
#@+node:ekr.20031218072017.3753: *5* defineEditMenuTables & helpers
def defineEditMenuTables(self):
self.defineEditMenuTopTable()
self.defineEditMenuEditBodyTable()
self.defineEditMenuEditHeadlineTable()
self.defineEditMenuFindMenuTable()
self.defineEditMenuTop2Table()
#@+node:ekr.20031218072017.839: *6* defineEditMenuTopTable
def defineEditMenuTopTable(self):
self.editMenuTopTable = [
# &: u,r reserved for undo/redo: a,d,p,t,y.
# & (later): e,g,n,v.
("Can't Undo", 'undo'),
("Can't Redo", 'redo'),
'-',
('Cu&t', 'cut-text'),
('Cop&y', 'copy-text'),
('&Paste', 'paste-text'),
('&Delete', 'backward-delete-char'),
('Select &All', 'select-all'),
'-',
]
#@+node:ekr.20031218072017.3754: *6* defineEditMenuEditBodyTable
def defineEditMenuEditBodyTable(self):
self.editMenuEditBodyTable = [
# Shortcuts a,b,d,e,i,l,m,n,r,s,t,u
'*extract-§ion',
'*extract-&names',
'*&extract',
'-',
'*convert-all-b&lanks',
'*convert-all-t&abs',
'*convert-&blanks',
'*convert-&tabs',
'*insert-body-&time',
'*&reformat-paragraph',
'-',
'*&indent-region',
'*&unindent-region',
'*&match-brackets',
'*add-comments',
'*delete-comments',
]
#@+node:ekr.20031218072017.3755: *6* defineEditMenuEditHeadlineTable
def defineEditMenuEditHeadlineTable(self):
self.editMenuEditHeadlineTable = [
'*edit-&headline',
'*&end-edit-headline',
'*&abort-edit-headline',
'*insert-headline-&time',
'*toggle-&angle-brackets',
]
#@+node:ekr.20031218072017.3756: *6* defineEditMenuFindMenuTable
def defineEditMenuFindMenuTable(self):
self.editMenuFindMenuTable = [
# &: a,b,c,d,e,f,h,i,l,n,o,p,q,r,s,u,w,x
'*&open-find-tab',
'*&hide-find-tab',
'*search-&with-present-options',
'-',
'*find-&next',
'*find-&prev',
'*&change',
'*find-&all',
'*clone-fi&nd-all',
'*change-a&ll',
'-',
'*&find-character',
'*find-character-extend-&selection',
'*&backward-find-character',
'*backward-find-character-&extend-selection',
'-',
'*&isearch-forward',
'*isea&rch-backward',
'*isearch-forward-rege&xp',
'*isearch-backward-regex&p',
'-',
'*&query-replace',
'*q&uery-replace-regex',
]
#@+node:ekr.20031218072017.3757: *6* defineEditMenuTop2Table
def defineEditMenuTop2Table(self):
c = self.c
try:
show = c.frame.body.getColorizer().showInvisibles
except Exception:
show = False
label = "Hide In&visibles" if show else "Show In&visibles"
self.editMenuTop2Table = [
'*&goto-global-line',
'*&execute-script',
(label, 'toggle-invisibles'),
("Setti&ngs", 'open-leoSettings-leo'),
]
# Top-level shortcuts earlier: a,d,p,t,u,y,z
# Top-level shortcuts here: e,g,n,v
#@+node:ekr.20031218072017.3758: *5* defineFileMenuTables & helpers
def defineFileMenuTables(self):
self.defineFileMenuTopTable()
self.defineFileMenuTop2Table()
self.defineFileMenuReadWriteMenuTable()
self.defineFileMenuTangleMenuTable()
self.defineFileMenuUntangleMenuTable()
self.defineFileMenuImportMenuTable()
self.defineFileMenuExportMenuTable()
self.defineFileMenuTop3MenuTable()
#@+node:ekr.20031218072017.3759: *6* defineFileMenuTopTable
def defineFileMenuTopTable(self):
self.fileMenuTopTable = [
'*&new',
('&Open...', 'open-outline'),
]
#@+node:ekr.20031218072017.3760: *6* defineFileMenuTop2Table
def defineFileMenuTop2Table(self):
self.fileMenuTop2Table = [
'-',
('&Close', 'close-window'),
('&Save', 'save-file'),
('Save &As', 'save-file-as'),
('Save As &Unzipped', 'save-file-as-unzipped'),
('Save As &Zipped', 'save-file-as-zipped'),
('Save &To', 'save-file-to'),
('Re&vert To Saved', 'revert'),
]
#@+node:ekr.20031218072017.3761: *6* defineFileMenuReadWriteMenuTable
def defineFileMenuReadWriteMenuTable(self):
self.fileMenuReadWriteMenuTable = [
'*&check-derived-file',
'*check-leo-file',
'-',
'*&read-outline-only',
'*write-&outline-only',
'-',
'*read-&file-into-node',
'*writ&e-file-from-node',
'-',
('Read @&auto Nodes', 'read-at-auto-nodes'),
('Write @a&uto Nodes', 'write-at-auto-nodes'),
('Write D&irty @a&uto Nodes', 'write-dirty-at-auto-nodes'),
'-',
('Read @file &Nodes', 'read-at-file-nodes'),
('Write &Dirty @file Nodes', 'write-dirty-at-file-nodes'),
('Write &Missing @file Nodes', 'write-missing-at-file-nodes'),
('&Write @file Nodes', 'write-at-file-nodes'),
]
# a,cd,e,f,i,l,m,n,o,r,u,w
#@+node:ekr.20031218072017.3762: *6* defineFileMenuTangleMenuTable
def defineFileMenuTangleMenuTable(self):
self.fileMenuTangleMenuTable = [
'*tangle-&all',
'*tangle-&marked',
'*&tangle',
]
#@+node:ekr.20031218072017.3763: *6* defineFileMenuUntangleMenuTable
def defineFileMenuUntangleMenuTable(self):
self.fileMenuUntangleMenuTable = [
'*untangle-&all',
'*untangle-&marked',
'*&untangle',
]
#@+node:ekr.20031218072017.3764: *6* defineFileMenuImportMenuTable
def defineFileMenuImportMenuTable(self):
self.fileMenuImportMenuTable = [
#&: c,d,f,n,o,r,
'*import-&derived-file',
('Import To @&file', 'import-at-file'),
('Import To @&root', 'import-at-root'),
'*import-&cweb-files',
'*import-&noweb-files',
'*import-flattened-&outline',
]
#@+node:ekr.20031218072017.3765: *6* defineFileMenuExportMenuTable
def defineFileMenuExportMenuTable(self):
self.fileMenuExportMenuTable = [
'*export-&headlines',
'*outline-to-&cweb',
'*outline-to-&noweb',
'*&flatten-outline',
'*&remove-sentinels',
'*&weave',
]
#@+node:ekr.20031218072017.3766: *6* defineFileMenuTop3MenuTable
def defineFileMenuTop3MenuTable(self):
self.fileMenuTop3MenuTable = [
('E&xit', 'exit-leo'),
]
#@+node:ekr.20031218072017.3767: *5* defineOutlineMenuTables & helpers
def defineOutlineMenuTables(self):
self.defineOutlineMenuTopMenuTable()
self.defineOutlineMenuCheckOutlineMenuTable()
self.defineOutlineMenuExpandContractMenuTable()
self.defineOutlineMenuMoveMenuTable()
self.defineOutlineMenuMarkMenuTable()
self.defineOutlineMenuGoToMenuTable()
#@+node:ekr.20031218072017.3768: *6* defineOutlineMenuTopMenuTable
def defineOutlineMenuTopMenuTable(self):
self.outlineMenuTopMenuTable = [
'*c&ut-node',
'*c&opy-node',
'*&paste-node',
('Pas&te Node As Clone', 'paste-retaining-clones'),
'*&delete-node',
'-',
'*&insert-node',
'*&clone-node',
'*sort-childre&n',
'*&sort-siblings',
'-',
'*&hoist',
('D&e-Hoist', 'de-hoist'), # To preserve the '-' in De-Hoist.
'-',
]
# Ampersand bindings: a,b,c,d,e,h,i,n,o,p,t,s,y
# Bindings for entries that go to submenus: a,g,k,m,x
#@+node:ekr.20040711140738: *6* defineOutlineMenuCheckOutlineMenuTable
def defineOutlineMenuCheckOutlineMenuTable(self):
self.outlineMenuCheckOutlineMenuTable = [
# &: a,c,d,l,o
'*check-&outline',
'*&dump-outline',
'-',
'*compare-&leo-files',
'-',
'*check-&all-python-code',
'*&check-python-code',
]
#@+node:ekr.20031218072017.3769: *6* defineOutlineMenuExpandContractMenuTable
def defineOutlineMenuExpandContractMenuTable(self):
self.outlineMenuExpandContractMenuTable = [
'*&contract-all',
'*contract-&node',
'*contract-&parent',
'*contract-or-go-&left',
'-',
'*expand-p&rev-level',
'*expand-n&ext-level',
'*expand-and-go-right',
'*expand-or-go-right',
'-',
'*expand-to-level-&1',
'*expand-to-level-&2',
'*expand-to-level-&3',
'*expand-to-level-&4',
'*expand-to-level-&5',
'*expand-to-level-&6',
'*expand-to-level-&7',
'*expand-to-level-&8',
'-',
'*expand-&all',
'*expand-n&ode',
]
#@+node:ekr.20031218072017.3770: *6* defineOutlineMenuMoveMenuTable
def defineOutlineMenuMoveMenuTable(self):
self.outlineMenuMoveMenuTable = [
('Move &Down', 'move-outline-down'),
('Move &Left', 'move-outline-left'),
('Move &Right', 'move-outline-right'),
('Move &Up', 'move-outline-up'),
'-',
'*&promote',
'*&demote',
]
#@+node:ekr.20031218072017.3771: *6* defineOutlineMenuMarkMenuTable
def defineOutlineMenuMarkMenuTable(self):
self.outlineMenuMarkMenuTable = [
'*&mark',
'*mark-&subheads',
'*mark-changed-&items',
# '*mark-changed-&roots',
'*mark-&clones',
'*&unmark-all',
]
#@+node:ekr.20031218072017.3772: *6* defineOutlineMenuGoToMenuTable
def defineOutlineMenuGoToMenuTable(self):
self.outlineMenuGoToMenuTable = [
# &: a,b,c,d,e,f,g,h,i,l,m,n,o,p,r,s,t,v,
('Go To &First Node', 'goto-first-node'),
('Go To First V&isible', 'goto-first-visible-node'),
('Go To First Si&bling', 'goto-first-sibling'),
'-',
('Go To Next C&hanged', 'goto-next-changed'),
('Go To Next &Clone', 'goto-next-clone'),
('Go To Next &Marked', 'goto-next-marked'),
('Go To Next N&ode', 'goto-next-node'),
('Go To Next &Sibling', 'goto-next-sibling'),
('Go To Next Visibl&e', 'goto-next-visible'),
('Go To Next Visite&d', 'go-forward'),
'-',
('Go To P&arent', 'goto-parent'),
'-',
('Go To &Prev Node', 'goto-prev-node'),
('Go To P&rev Sibling', 'goto-prev-sibling'),
('Go To Pre&v Visible', 'goto-prev-visible'),
('Go To Prev Visi&ted', 'go-back'),
'-',
('Go To Last Node', 'goto-last-node'),
('Go To Last Siblin&g', 'goto-last-sibling'),
('Go To &Last Visible', 'goto-last-visible-node'),
]
#@+node:ekr.20050921103230: *5* defineCmdsMenuTables & helpers
def defineCmdsMenuTables(self):
self.defineCmdsMenuAbbrevTable()
self.defineCmdsMenuBodyEditorsTable()
self.defineCmdsMenuBuffersTable()
self.defineCmdsMenuChaptersTable()
self.defineCmdsMenuCursorTable()
self.defineCmdsMenuFocusTable()
self.defineCmdsMenuMacroTable()
self.defineCmdsMenuMinibufferTable()
self.defineCmdsMenuPickersTable()
self.defineCmdsMenuRectanglesTable()
self.defineCmdsMenuRegistersTable()
self.defineCmdsMenuRunTable()
self.defineCmdsMenuScrollTable()
self.defineCmdsMenuSpellCheckTable()
self.defineCmdsMenuTextTable()
self.defineCmdsMenuToggleTable()
#@+node:ekr.20060117094955.1: *6* defineCmdsMenuAbbrevTable
def defineCmdsMenuAbbrevTable(self):
self.cmdsMenuAbbrevTable = [
# &: a,e,i,k,l,r,w,v
'abbre&v-mode',
'-',
'&list-abbrevs',
'&read-abbrev-file',
'&write-abbrev-file',
'-',
'&add-global-abbrev',
'&inverse-add-global-abbrev',
'&kill-all-abbrevs',
# '-',
# 'expand-abbrev', # Not a command
# '&expand-region-abbrevs',
]
#@+node:ekr.20060912093104: *6* defineCmdsMenuBodyEditorsTable
def defineCmdsMenuBodyEditorsTable(self):
self.cmdsMenuBodyEditorsTable = [
# &: a,c,d
'&add-editor',
'&cycle-editor-focus',
'&delete-editor',
]
#@+node:ekr.20060117095212: *6* defineCmdsMenuBufferTable
def defineCmdsMenuBuffersTable(self):
self.cmdsMenuBuffersTable = [
'&append-to-buffer',
'&kill-buffer',
'list-&buffers',
'&list-buffers-alphabetically',
'&prepend-to-buffer',
'&rename-buffer',
'&switch-to-buffer',
]
#@+node:ekr.20070604205927: *6* defineCmdsMenuChaptersTable
def defineCmdsMenuChaptersTable(self):
self.cmdsMenuChaptersTable = [
'&clone-node-to-chapter',
'c&opy-node-to-chapter',
'c&reate-chapter',
'&move-node-to-chapter',
'&remove-chapter',
'&select-chapter',
]
#@+node:ekr.20060924124119: *6* defineCmdsMenuCursorTable
def defineCmdsMenuCursorTable(self):
self.cursorMenuBackTable = [
# &: b,c,l,p,s,v,w
'back-&char',
'back-¶graph',
'back-&sentence',
'back-&word',
'-',
'beginning-of-&buffer',
'beginning-of-&line',
'-',
'pre&vious-line',
]
self.cursorMeuuBackExtendTable = [
# &: b,c,l,p,s,v,w
'back-&char-extend-selection',
'back-¶graph-extend-selection',
'back-&sentence-extend-selection',
'back-&word-extend-selection',
'-',
'beginning-of-&buffer-extend-selection',
'beginning-of-&line-extend-selection',
'-',
'pre&vious-line-extend-selection',
]
self.cursorMenuExtendTable = [
# &: l,p,s,w
'extend-to-&line',
'extend-to-¶graph',
'extend-to-&sentence',
'extend-to-&word',
]
self.cursorMenuForwardTable = [
# &: b,c,e,l,n,p,s,w
'end-of-&buffer',
'end-of-&line',
'-',
'forward-&char',
'forward-¶graph',
'forward-&sentence',
'forward-&end-word',
'forward-&word',
'-',
'&next-line',
]
self.cursorMenuForwardExtendTable = [
# &: b,c,e,l,n,p,s,w
'end-of-&buffer-extend-selection',
'end-of-&line-extend-selection',
'-',
'forward-&char-extend-selection',
'forward-¶graph-extend-selection',
'forward-&sentence-extend-selection',
'forward-&end-word-extend-selection',
'forward-&word-extend-selection', #
'-',
'&next-line-extend-selection',
]
#@+node:ekr.20060923060822: *6* defineCmdsMenuFocusTable
def defineCmdsMenuFocusTable(self):
self.cmdsMenuFocusTable = [
'&cycle-all-focus',
'focus-to-&body',
'focus-to-&log',
'focus-to-&minibuffer',
'focus-to-&tree',
]
#@+node:ekr.20060117114315: *6* defineCmdsMenuMacroTable
def defineCmdsMenuMacroTable(self):
self.cmdsMenuMacroTable = [
'&load-file',
'-',
'&start-kbd-macro',
# '&end-kbd-macro',
'&name-last-kbd-macro',
'-',
'&call-last-kbd-macro',
# '&insert-kbd-macro',
]
#@+node:ekr.20061011084101.1: *6* defineCmdsMenuMinibufferTable
def defineCmdsMenuMinibufferTable(self):
self.cmdsMenuMinibufferTable = [
# &: f,h,i,q,r,s,v
'&full-command',
'keyboard-&quit',
'&repeat-complex-command',
'&view-lossage',
'-',
'&show-mini-buffer',
'h&ide-mini-buffer',
'-',
'&help-for-minibuffer',
]
#@+node:ekr.20061011085641: *6* defineCmdsMenuPickersTable
def defineCmdsMenuPickersTable(self):
self.cmdsMenuPickersTable = [
'show-&colors',
'show-find-&options',
'show-&fonts',
]
#@+node:ekr.20060117095212.2: *6* defineCmdsMenuRectanglesTable
def defineCmdsMenuRectanglesTable(self):
self.cmdsMenuRectanglesTable = [
'&clear-rectangle',
'c&lose-rectangle',
'&delete-rectangle',
'&kill-rectangle',
'&open-rectangle',
'&string-rectangle',
'&yank-rectangle',
]
#@+node:ekr.20060117095212.1: *6* defineCmdsMenuRegistersTable
def defineCmdsMenuRegistersTable(self):
self.cmdsMenuRegistersTable = [
# &: a,c,e,i,j,n,p,r,v
'&append-to-register',
'copy-r&ectangle-to-register',
'©-to-register',
'i&ncrement-register',
'&insert-register',
'&jump-to-register',
# 'number-to-register',
'&point-to-register',
'p&repend-to-register',
'&view-register',
]
#@+node:ekr.20061119061958: *6* defineCmdsMenuRunTable
def defineCmdsMenuRunTable(self):
self.cmdsMenuRunTable = [
# &: e,r
'&execute-script',
'&run-unit-tests',
]
#@+node:ekr.20060923060822.1: *6* defineCmdsMenuScrollTable
def defineCmdsMenuScrollTable(self):
self.cmdsMenuScrollTable = [
# &: c,d,e,f,l,o,p,r,v,x
'scroll-outline-down-&line',
'scroll-outline-down-&page',
'scroll-outline-le&ft',
'scroll-outline-&right',
's&croll-outline-up-line',
'scr&oll-outline-up-page',
'-',
'scroll-down-half-page',
'scroll-down-line',
'scroll-&down-page',
'scroll-up-half-page',
'scroll-up-line',
'scroll-&up-page',
]
#@+node:ekr.20060117095212.7: *6* defineCmdsMenuSpellCheckTable
def defineCmdsMenuSpellCheckTable(self):
self.cmdsMenuSpellCheckTable = [
'&open-spell-tab',
'spell-&change',
'spell-change-&then-find',
'spell-&find',
'spell-&ignore',
]
#@+node:ekr.20060924161901: *6* defineCmdsMenuTextTable
def defineCmdsMenuTextTable(self):
self.cmdsMenuTextTable = [
# &: a,b,c,d,e,f,g,i,l,m,n,o,p,r,s,u
'&beautify',
'beautify-&all',
'-',
'center-&line',
'center-®ion',
'-',
'&capitalize-word',
'&downcase-word',
'&upcase-word',
'-',
'd&owncase-region',
'u&pcase-region',
'-',
'&indent-region',
'indent-r&elative',
'indent-ri&gidly',
'u&nindent-region',
'-',
'sort-colu&mns',
'sort-&fields',
'&sort-lines',
]
#@+node:ekr.20060926161940: *6* defineCmdsMenuToggleTable
def defineCmdsMenuToggleTable(self):
self.cmdsMenuToggleTable = [
# &: d,e,m,s,t,u,v
'toggle-a&utocompleter',
'toggle-call&tips',
'toggle-&extend-mode',
'toggle-input-&state',
'toggle-in&visibles',
'toggle-&mini-buffer',
'toggle-split-&direction',
'-',
# &: a,b,c,f,h,i,r,w,x
'toggle-find-&ignore-case-option',
'toggle-find-in-&body-option',
'toggle-find-in-&headline-option',
'toggle-find-mark-&changes-option',
'toggle-find-mark-&finds-option',
'toggle-find-rege&x-option',
'toggle-find-&reverse-option',
'toggle-find-&word-option',
'toggle-find-wrap-&around-option',
]
#@+node:ekr.20031218072017.3773: *5* defineWindowMenuTables
def defineWindowMenuTables(self):
self.windowMenuTopTable = [
# &: a,b,c,d,e,f,l,m,n,o,p,r,s,t,u,w,x,y
'*&equal-sized-panes',
'*&toggle-active-pane',
'*toggle-&split-direction',
'-',
'*contract-&body-pane',
'*contract-&log-pane',
'*contract-&outline-pane',
'*contract-&pane',
'-',
'*expand-bo&dy-pane',
'*expand-lo&g-pane',
'*expand-o&utline-pane',
'*expand-pa&ne',
'-',
'*&fully-expand-body-pane',
'*full&y-expand-log-pane',
'*fully-e&xpand-outline-pane',
'*fully-exp&and-pane',
'-',
'*&resize-to-screen',
'*&cascade-windows',
'*&minimize-all',
'-',
'*open-compare-window',
'*open-python-&window',
]
#@+node:ekr.20031218072017.3774: *5* defineHelpMenuTables
def defineHelpMenuTables(self):
self.helpMenuTable = [
# &: a,b,c,d,e,f,h,l,m,n,o,p,r,s,t,u
('&About Leo...', 'about-leo'),
('Online &Home Page', 'open-online-home'),
# '*open-online-&tutorial',
'*open-&users-guide',
'-',
('Open Leo&Docs.leo', 'open-leoDocs-leo'),
('Open Leo&Plugins.leo', 'open-leoPlugins-leo'),
('Open Leo&Settings.leo', 'open-leoSettings-leo'),
('Open &myLeoSettings.leo', 'open-myLeoSettings-leo'),
('Open scr&ipts.leo', 'open-scripts-leo'),
'-',
'*he&lp-for-minibuffer',
'*help-for-&command',
'-',
'*&apropos-autocompletion',
'*apropos-&bindings',
'*apropos-&debugging-commands',
'*apropos-&find-commands',
'-',
'*pri&nt-bindings',
'*print-c&ommands',
]
#@+node:ekr.20031218072017.3780: *4* hasSelection
# Returns True if text in the outline or body text is selected.
def hasSelection(self):
c = self.c; w = c.frame.body.wrapper
if c.frame.body:
first, last = w.getSelectionRange()
return first != last
else:
return False
#@+node:ekr.20051022053758.1: *3* Helpers
#@+node:ekr.20031218072017.3783: *4* canonicalizeMenuName & cononicalizeTranslatedMenuName
def canonicalizeMenuName(self, name):
return ''.join([ch for ch in name.lower() if ch.isalnum()])
def canonicalizeTranslatedMenuName(self, name):
return ''.join([ch for ch in name.lower() if ch not in '& \t\n\r'])
#@+node:ekr.20051022044950: *4* computeOldStyleShortcutKey
def computeOldStyleShortcutKey(self, s):
'''Compute the old-style shortcut key for @shortcuts entries.'''
return ''.join([ch for ch in s.strip().lower() if ch.isalnum()])
#@+node:ekr.20031218072017.1723: *4* createMenuEntries (LeoMenu) & helpers
def createMenuEntries(self, menu, table, dynamicMenu=False):
'''Create a menu entry from the table.
New in 4.4: this method shows the shortcut in the menu,
but this method **never** binds any shortcuts.'''
c = self.c
if g.app.unitTesting: return
if not menu: return
self.traceMenuTable(table)
for data in table:
label, command, done = self.getMenuEntryInfo(data, menu)
if done: continue
commandName = self.getMenuEntryBindings(command, dynamicMenu, label)
if not commandName: continue
masterMenuCallback = self.createMasterMenuCallback(
dynamicMenu, command, commandName)
realLabel = self.getRealMenuName(label)
amp_index = realLabel.find("&")
realLabel = realLabel.replace("&", "")
# c.add_command ensures that c.outerUpdate is called.
c.add_command(menu, label=realLabel,
accelerator='', # The accelerator is now computed dynamically.
command=masterMenuCallback,
commandName=commandName,
underline=amp_index)
#@+node:ekr.20111102072143.10016: *5* createMasterMenuCallback
def createMasterMenuCallback(self, dynamicMenu, command, commandName):
trace = False and not g.unitTesting
c = self.c
def setWidget():
w = c.frame.getFocus()
if w and sys.platform.startswith('darwin'):
# 2012/01/11: redirect (MacOS only).
wname = c.widget_name(w)
if wname.startswith('head'):
w = c.frame.tree.edit_widget(c.p)
# 2015/05/14: return a wrapper if possible.
if not g.isTextWrapper(w):
w = getattr(w, 'wrapper', w)
return w
if dynamicMenu:
if command:
def masterDynamicMenuCallback(c=c, command=command):
# 2012/01/07: set w here.
w = setWidget()
if trace: g.trace(command.__name__, w)
event = g.app.gui.create_key_event(c, None, None, w)
return c.k.masterCommand(func=command, event=event)
return masterDynamicMenuCallback
else:
g.internalError('no callback for dynamic menu item.')
def dummyMasterMenuCallback():
pass
return dummyMasterMenuCallback
else:
def masterStaticMenuCallback(c=c, commandName=commandName):
# 2011/10/28: Use only the command name to dispatch the command.
# 2012/01/07: Bug fix: set w here.
w = setWidget()
if trace: g.trace(commandName, w, c)
event = g.app.gui.create_key_event(c, None, None, w)
return c.k.masterCommand(commandName=commandName, event=event)
return masterStaticMenuCallback
#@+node:ekr.20111028060955.16568: *5* getMenuEntryBindings
def getMenuEntryBindings(self, command, dynamicMenu, label):
'''Compute commandName from command.'''
trace = False and not g.unitTesting
c = self.c
if g.isString(command):
# Command is really a command name.
commandName = command
else:
# First, get the old-style name.
commandName = self.computeOldStyleShortcutKey(label)
command = c.commandsDict.get(commandName)
if trace and not command and not dynamicMenu:
# This may come from a plugin that normally isn't enabled.
g.trace('No inverse for %s' % commandName)
return commandName
#@+node:ekr.20111028060955.16565: *5* getMenuEntryInfo
def getMenuEntryInfo(self, data, menu):
done = False
if g.isString(data):
# A single string is both the label and the command.
s = data
removeHyphens = s and s[0] == '*'
if removeHyphens: s = s[1:]
label = self.capitalizeMinibufferMenuName(s, removeHyphens)
command = s.replace('&', '').lower()
if label == '-':
self.add_separator(menu)
done = True # That's all.
else:
ok = isinstance(data, (list, tuple)) and len(data) in (2, 3)
if ok:
if len(data) == 2:
# Command can be a minibuffer-command name.
label, command = data
else:
# Ignore shortcuts bound in menu tables.
label, junk, command = data
if label in (None, '-'):
self.add_separator(menu)
done = True # That's all.
else:
g.trace('bad data in menu table: %s' % repr(data))
done = True # Ignore bad data
return label, command, done
#@+node:ekr.20111028060955.16563: *5* traceMenuTable
def traceMenuTable(self, table):
trace = False and not g.unitTesting
if not trace: return
format = '%40s %s'
g.trace('*' * 40, g.callers())
for data in table:
if isinstance(data, (list, tuple)):
n = len(data)
if n == 2:
print(format % (data[0], data[1]))
elif n == 3:
name, junk, func = data
print(format % (name, func and func.__name__ or '<NO FUNC>'))
else:
print(format % (data, ''))
#@+node:ekr.20031218072017.3784: *4* createMenuItemsFromTable
def createMenuItemsFromTable(self, menuName, table, dynamicMenu=False):
trace = False
if g.app.gui.isNullGui:
return
try:
menu = self.getMenu(menuName)
if menu is None:
if trace and not g.app.menuWarningsGiven: ### and not g.app.gui.isNullGui:
g.es_print(g.app.gui.guiName(), g.callers())
g.es_print("menu does not exist: %s" % (menuName))
return
self.createMenuEntries(menu, table, dynamicMenu=dynamicMenu)
except Exception:
g.es_print("exception creating items for", menuName, "menu")
g.es_exception()
g.app.menuWarningsGiven = True
#@+node:ekr.20031218072017.3804: *4* createNewMenu
def createNewMenu(self, menuName, parentName="top", before=None):
try:
parent = self.getMenu(parentName) # parent may be None.
menu = self.getMenu(menuName)
if menu:
# Not an error.
# g.error("menu already exists:", menuName)
return None # Fix #528.
else:
menu = self.new_menu(parent, tearoff=0, label=menuName)
self.setMenu(menuName, menu)
label = self.getRealMenuName(menuName)
amp_index = label.find("&")
label = label.replace("&", "")
if before: # Insert the menu before the "before" menu.
index_label = self.getRealMenuName(before)
amp_index = index_label.find("&")
index_label = index_label.replace("&", "")
index = parent.index(index_label)
self.insert_cascade(parent, index=index, label=label, menu=menu, underline=amp_index)
else:
self.add_cascade(parent, label=label, menu=menu, underline=amp_index)
return menu
except Exception:
g.es("exception creating", menuName, "menu")
g.es_exception()
return None
#@+node:ekr.20031218072017.4116: *4* createOpenWithMenuFromTable & helpers (LeoMenu)
def createOpenWithMenuFromTable(self, table):
'''
Table is a list of dictionaries, created from @openwith settings nodes.
This menu code uses these keys:
'name': menu label.
'shortcut': optional menu shortcut.
efc.open_temp_file uses these keys:
'args': the command-line arguments to be used to open the file.
'ext': the file extension.
'kind': the method used to open the file, such as subprocess.Popen.
'''
# trace = False and not g.unitTesting
k = self.c.k
if not table: return
g.app.openWithTable = table # Override any previous table.
# Delete the previous entry.
parent = self.getMenu("File")
# if trace: g.trace('parent',parent)
if not parent:
if not g.app.batchMode:
g.error('', 'createOpenWithMenuFromTable:', 'no File menu')
return
label = self.getRealMenuName("Open &With...")
amp_index = label.find("&")
label = label.replace("&", "")
try:
index = parent.index(label)
parent.delete(index)
except Exception:
try:
index = parent.index("Open With...")
parent.delete(index)
except Exception:
g.trace('unexpected exception')
g.es_exception()
return
# Create the Open With menu.
openWithMenu = self.createOpenWithMenu(parent, label, index, amp_index)
if not openWithMenu:
g.trace('openWithMenu returns None')
return
self.setMenu("Open With...", openWithMenu)
# Create the menu items in of the Open With menu.
self.createOpenWithMenuItemsFromTable(openWithMenu, table)
for d in table:
k.bindOpenWith(d)
#@+node:ekr.20051022043608.1: *5* createOpenWithMenuItemsFromTable & callback (LeoMenu)
def createOpenWithMenuItemsFromTable(self, menu, table):
'''
Create an entry in the Open with Menu from the table, a list of dictionaries.
Each dictionary d has the following keys:
'args': the command-line arguments used to open the file.
'ext': not used here: used by efc.open_temp_file.
'kind': not used here: used by efc.open_temp_file.
'name': menu label.
'shortcut': optional menu shortcut.
'''
trace = False and not g.unitTesting
c = self.c
if g.app.unitTesting: return
for d in table:
label = d.get('name')
args = d.get('args', [])
if trace:
print()
for key in sorted(list(d.keys())):
print('%15s %s' % (key, d.get(key)))
accel = d.get('shortcut') or ''
if label and args:
realLabel = self.getRealMenuName(label)
underline = realLabel.find("&")
realLabel = realLabel.replace("&", "")
callback = self.defineOpenWithMenuCallback(d)
c.add_command(menu,
label=realLabel,
accelerator=accel,
command=callback,
underline=underline)
#@+node:ekr.20031218072017.4118: *6* defineOpenWithMenuCallback (LeoMenu)
def defineOpenWithMenuCallback(self, d):
# The first parameter must be event, and it must default to None.
def openWithMenuCallback(event=None, self=self, d=d):
return self.c.openWith(d=d)
return openWithMenuCallback
#@+node:tbrown.20080509212202.7: *4* deleteRecentFilesMenuItems (LeoMenu)
def deleteRecentFilesMenuItems(self, menu):
"""Delete recent file menu entries"""
rf = g.app.recentFilesManager
# Why not just delete all the entries?
recentFiles = rf.getRecentFiles()
toDrop = len(recentFiles) + len(rf.getRecentFilesTable())
self.delete_range(menu, 0, toDrop)
for i in rf.groupedMenus:
menu = self.getMenu(i)
if menu:
self.destroy(menu)
self.destroyMenu(i)
#@+node:ekr.20031218072017.4117: *4* defineMenuCallback
def defineMenuCallback(self, command, name, minibufferCommand):
c = self.c
if minibufferCommand:
# Create a dummy event as a signal to doCommand.
event = g.app.gui.create_key_event(c, None, None, None)
# The first parameter must be event, and it must default to None.
def minibufferMenuCallback(event=event, self=self, command=command, label=name):
c = self.c
return c.doCommand(command, label, event)
return minibufferMenuCallback
else:
# The first parameter must be event, and it must default to None.
def legacyMenuCallback(event=None, self=self, command=command, label=name):
c = self.c # 2012/03/04.
c.check_event(event)
return c.doCommand(command, label)
return legacyMenuCallback
#@+node:ekr.20031218072017.3805: *4* deleteMenu (LeoMenu)
def deleteMenu(self, menuName):
try:
menu = self.getMenu(menuName)
if menu:
self.destroy(menu)
self.destroyMenu(menuName)
else:
g.es("can't delete menu:", menuName)
except Exception:
g.es("exception deleting", menuName, "menu")
g.es_exception()
#@+node:ekr.20031218072017.3806: *4* deleteMenuItem
def deleteMenuItem(self, itemName, menuName="top"):
"""Delete itemName from the menu whose name is menuName."""
try:
menu = self.getMenu(menuName)
if menu:
realItemName = self.getRealMenuName(itemName)
self.delete(menu, realItemName)
else:
g.es("menu not found:", menuName)
except Exception:
g.es("exception deleting", itemName, "from", menuName, "menu")
g.es_exception()
#@+node:ekr.20031218072017.3782: *4* get/setRealMenuName & setRealMenuNamesFromTable
# Returns the translation of a menu name or an item name.
def getRealMenuName(self, menuName):
cmn = self.canonicalizeTranslatedMenuName(menuName)
return g.app.realMenuNameDict.get(cmn, menuName)
def setRealMenuName(self, untrans, trans):
cmn = self.canonicalizeTranslatedMenuName(untrans)
g.app.realMenuNameDict[cmn] = trans
def setRealMenuNamesFromTable(self, table):
try:
for untrans, trans in table:
self.setRealMenuName(untrans, trans)
except Exception:
g.es("exception in", "setRealMenuNamesFromTable")
g.es_exception()
#@+node:ekr.20031218072017.3807: *4* getMenu, setMenu, destroyMenu (LeoMenu)
def getMenu(self, menuName):
cmn = self.canonicalizeMenuName(menuName)
# if cmn == 'openwith': g.trace('LeoMenu',g.dictToString(self.menus))
return self.menus.get(cmn)
def setMenu(self, menuName, menu):
cmn = self.canonicalizeMenuName(menuName)
self.menus[cmn] = menu
def destroyMenu(self, menuName):
cmn = self.canonicalizeMenuName(menuName)
del self.menus[cmn]
#@+node:ekr.20031218072017.3808: *3* Must be overridden in menu subclasses
#@+node:ekr.20031218072017.3809: *4* 9 Routines with Tk spellings
def add_cascade(self, parent, label, menu, underline):
self.oops()
def add_command(self, menu, **keys):
self.oops()
def add_separator(self, menu):
self.oops()
# def bind (self,bind_shortcut,callback):
# self.oops()
def delete(self, menu, realItemName):
self.oops()
def delete_range(self, menu, n1, n2):
self.oops()
def destroy(self, menu):
self.oops()
def insert(self, menuName, position, label, command, underline=None): # New in Leo 4.4.3 a1
self.oops()
def insert_cascade(self, parent, index, label, menu, underline):
self.oops()
def new_menu(self, parent, tearoff=0, label=''): # 2010: added label arg for pylint.
self.oops(); return None
#@+node:ekr.20031218072017.3810: *4* 9 Routines with new spellings
def activateMenu(self, menuName): # New in Leo 4.4b2.
self.oops()
def clearAccel(self, menu, name):
self.oops()
def createMenuBar(self, frame):
self.oops()
def createOpenWithMenu(self, parent, label, index, amp_index):
self.oops(); return None
def disableMenu(self, menu, name):
self.oops()
def enableMenu(self, menu, name, val):
self.oops()
def getMacHelpMenu(self, table):
return None
def getMenuLabel(self, menu, name):
self.oops()
def setMenuLabel(self, menu, name, label, underline=-1):
self.oops()
#@-others
#@+node:ekr.20031218072017.3811: ** class NullMenu
class NullMenu(LeoMenu):
"""A null menu class for testing and batch execution."""
#@+others
#@+node:ekr.20050104094308: *3* ctor (NullMenu)
def __init__(self, frame):
# Init the base class.
LeoMenu.__init__(self, frame)
self.isNull = True
#@+node:ekr.20050104094029: *3* oops
def oops(self):
# g.trace("LeoMenu", g.callers())
pass
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 70
#@-leo
| 40.602055
| 105
| 0.553434
|
cf8cae5ca1e721e17f65dea1c6c6a467af7b677d
| 2,376
|
py
|
Python
|
examples/example_wldeblend.py
|
LSSTDESC/descwl_shear_sims
|
1c696518104b7f301dd6c69571239431c6232110
|
[
"BSD-3-Clause"
] | null | null | null |
examples/example_wldeblend.py
|
LSSTDESC/descwl_shear_sims
|
1c696518104b7f301dd6c69571239431c6232110
|
[
"BSD-3-Clause"
] | 11
|
2019-12-10T23:30:27.000Z
|
2019-12-24T13:59:32.000Z
|
examples/example_wldeblend.py
|
LSSTDESC/wl-shear-testing-sims
|
6e4a0baa6f664b5bc52b08b55614eaa58c8b0748
|
[
"BSD-3-Clause"
] | null | null | null |
"""
example with WeakLensingDeblending galaxies and power spectrum psf
"""
import os
import numpy as np
import lsst.afw.image as afw_image
import lsst.afw.geom as afw_geom
from descwl_shear_sims.galaxies import make_galaxy_catalog
from descwl_shear_sims.psfs import make_ps_psf
from descwl_shear_sims.sim import make_sim, get_se_dim
def go():
if "CATSIM_DIR" not in os.environ:
# this contains the galaxy and star catalogs for generatig
# WeakLensingDeblending galaxies and stars
print('you need CATSIM_DIR defined to run this example')
seed = 761
rng = np.random.RandomState(seed)
dither = True
rotate = True
coadd_dim = 351
psf_dim = 51
bands = ['r', 'i']
# this makes WeakLensingDeblending galaxies
galaxy_catalog = make_galaxy_catalog(
rng=rng,
gal_type='wldeblend',
coadd_dim=coadd_dim,
buff=30,
)
# power spectrum psf
se_dim = get_se_dim(coadd_dim=coadd_dim, rotate=rotate, dither=dither)
psf = make_ps_psf(rng=rng, dim=se_dim)
# generate simulated data, see below for whats in this dict
data = make_sim(
rng=rng,
galaxy_catalog=galaxy_catalog,
coadd_dim=coadd_dim,
psf_dim=psf_dim,
bands=bands,
g1=0.02,
g2=0.00,
psf=psf,
dither=dither,
rotate=rotate,
)
# data is a dict with the following keys.
# band_data: a dict, keyed by band name, with values that are a list of
# exps
# coadd_wcs: is a DM wcs for use in coadding
# psf_dims: is the psf dim we sent in (psf_dim, psf_dim)
# coadd_bbox: is an lsst Box2I, for use in coadding
# bright_info: is a structured array with position and mask info for bright
# objects
for key in ['band_data', 'coadd_wcs', 'psf_dims', 'coadd_bbox', 'bright_info']:
assert key in data
for band in bands:
assert band in data['band_data']
assert isinstance(data['band_data'][band][0], afw_image.ExposureF)
assert isinstance(data['coadd_wcs'], afw_geom.SkyWcs)
assert data['psf_dims'] == (psf_dim, )*2
extent = data['coadd_bbox'].getDimensions()
edims = (extent.getX(), extent.getY())
assert edims == (coadd_dim, )*2
# we should have no bright objects
assert data['bright_info'].size == 0
if __name__ == '__main__':
go()
| 27.627907
| 83
| 0.661195
|
34808e28a6f4c9b6b067b3d734fc6e8e8500201b
| 1,049
|
py
|
Python
|
setup.py
|
nordic-institute/X-Road-Security-Server-toolkit
|
1538dbf3d76647f4fb3a72bbe93bf54f414ee9fb
|
[
"MIT"
] | 7
|
2020-11-01T19:50:11.000Z
|
2022-01-18T17:45:19.000Z
|
setup.py
|
nordic-institute/X-Road-Security-Server-toolkit
|
1538dbf3d76647f4fb3a72bbe93bf54f414ee9fb
|
[
"MIT"
] | 24
|
2020-11-09T08:09:10.000Z
|
2021-06-16T07:22:14.000Z
|
setup.py
|
nordic-institute/X-Road-Security-Server-toolkit
|
1538dbf3d76647f4fb3a72bbe93bf54f414ee9fb
|
[
"MIT"
] | 1
|
2021-04-27T14:39:48.000Z
|
2021-04-27T14:39:48.000Z
|
"""Project setup"""
from setuptools import setup, find_packages
from xrdsst.core.version import get_version
VERSION = get_version()
f = open('README.md', 'r')
LONG_DESCRIPTION = f.read()
f.close()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='xrdsst',
version=VERSION,
python_requires='>=3.6',
description='A toolkit for configuring X-Road Security Server',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
classifiers=[],
install_requires=required,
author='Finnish Digital Agency',
author_email='info@dvv.fi',
url='https://github.com/nordic-institute/X-Road-Security-Server-toolkit',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'tests*']),
package_data={'xrdsst': ['templates/*']},
setup_requires=['pytest-runner', 'pytest-pylint'],
tests_require=['pytest', 'pylint'],
include_package_data=True,
entry_points="""
[console_scripts]
xrdsst = xrdsst.main:main
""",
)
| 28.351351
| 77
| 0.685415
|
289f55076c204cd286e806cc764a78d698692f02
| 6,865
|
py
|
Python
|
representation_batch_rl/batch_rl/sac_v1.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
representation_batch_rl/batch_rl/sac_v1.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
representation_batch_rl/batch_rl/sac_v1.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Implementation of DDPG."""
import typing
from dm_env import specs as dm_env_specs
import tensorflow as tf
from tf_agents.specs.tensor_spec import TensorSpec
from representation_batch_rl.batch_rl import critic
from representation_batch_rl.batch_rl import policies
class SAC(object):
"""Class performing SAC training."""
def __init__(self,
observation_spec,
action_spec,
actor_lr = 3e-4,
critic_lr = 3e-4,
alpha_lr = 3e-4,
discount = 0.99,
tau = 0.005,
target_update_period = 1,
target_entropy = 0.0,
use_soft_critic = False):
"""Creates networks.
Args:
observation_spec: environment observation spec.
action_spec: Action spec.
actor_lr: Actor learning rate.
critic_lr: Critic learning rate.
alpha_lr: Temperature learning rate.
discount: MDP discount.
tau: Soft target update parameter.
target_update_period: Target network update period.
target_entropy: Target entropy.
use_soft_critic: Whether to use soft critic representation.
"""
assert len(observation_spec.shape) == 1
state_dim = observation_spec.shape[0]
self.actor = policies.DiagGuassianPolicy(state_dim, action_spec)
self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)
self.log_alpha = tf.Variable(tf.math.log(0.1), trainable=True)
self.alpha_optimizer = tf.keras.optimizers.Adam(learning_rate=alpha_lr)
self.target_entropy = target_entropy
self.discount = discount
self.tau = tau
self.target_update_period = target_update_period
self.value = critic.CriticNet(state_dim)
self.value_target = critic.CriticNet(state_dim)
critic.soft_update(self.value, self.value_target, tau=1.0)
self.value_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
if use_soft_critic:
self.critic = critic.SoftCritic(state_dim, action_spec)
else:
action_dim = action_spec.shape[0]
self.critic = critic.Critic(state_dim, action_dim)
self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
def fit_value(self, states):
"""Updates critic parameters.
Args:
states: Batch of states.
Returns:
Dictionary with information to track.
"""
actions, log_probs = self.actor(
states, sample=True, with_log_probs=True)
q1, q2 = self.critic(states, actions)
q = tf.minimum(q1, q2) - self.alpha * log_probs
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.value.trainable_variables)
v = self.value(states)
value_loss = tf.losses.mean_squared_error(q, v)
grads = tape.gradient(value_loss, self.value.trainable_variables)
self.value_optimizer.apply_gradients(
zip(grads, self.value.trainable_variables))
if self.value_optimizer.iterations % self.target_update_period == 0:
critic.soft_update(self.value, self.value_target, tau=self.tau)
return {
'v': tf.reduce_mean(v),
'value_loss': value_loss
}
def fit_critic(self, states, actions,
next_states, rewards,
discounts):
"""Updates critic parameters.
Args:
states: Batch of states.
actions: Batch of actions.
next_states: Batch of next states.
rewards: Batch of rewards.
discounts: Batch of masks indicating the end of the episodes.
Returns:
Dictionary with information to track.
"""
next_v = self.value_target(next_states)
target_q = rewards + self.discount * discounts * next_v
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.critic.trainable_variables)
q1, q2 = self.critic(states, actions)
critic_loss = (tf.losses.mean_squared_error(target_q, q1) +
tf.losses.mean_squared_error(target_q, q2))
critic_grads = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(
zip(critic_grads, self.critic.trainable_variables))
return {
'q1': tf.reduce_mean(q1),
'q2': tf.reduce_mean(q2),
'critic_loss': critic_loss
}
@property
def alpha(self):
return tf.exp(self.log_alpha)
def fit_actor(self, states):
"""Updates critic parameters.
Args:
states: A batch of states.
Returns:
Actor loss.
"""
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.actor.trainable_variables)
actions, log_probs = self.actor(states, sample=True, with_log_probs=True)
q1, q2 = self.critic(states, actions)
q = tf.minimum(q1, q2)
actor_loss = tf.reduce_mean(self.alpha * log_probs - q)
actor_grads = tape.gradient(actor_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(
zip(actor_grads, self.actor.trainable_variables))
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch([self.log_alpha])
alpha_loss = tf.reduce_mean(self.alpha *
(-log_probs - self.target_entropy))
alpha_grads = tape.gradient(alpha_loss, [self.log_alpha])
self.alpha_optimizer.apply_gradients(zip(alpha_grads, [self.log_alpha]))
return {
'actor_loss': actor_loss,
'alpha': self.alpha,
'alpha_loss': alpha_loss
}
@tf.function
def update_step(self, replay_buffer_iter):
"""Performs a single training step for critic and actor.
Args:
replay_buffer_iter: An tensorflow graph iteratable object.
Returns:
Dictionary with losses to track.
"""
states, actions, rewards, discounts, next_states = next(replay_buffer_iter)
value_dict = self.fit_value(states)
critic_dict = self.fit_critic(states, actions, next_states, rewards,
discounts)
actor_dict = self.fit_actor(states)
return {**value_dict, **actor_dict, **critic_dict}
@tf.function
def act(self, states):
return self.actor(states, sample=False)
def save_weights(self, path):
pass
def load_weights(self, path):
pass
| 30.511111
| 79
| 0.683758
|
33f1cc42de7198829d88b7a68688289e72d8ad77
| 2,682
|
py
|
Python
|
catalog-be/src/main/resources/scripts/import/tosca/importNormativeInterfaceLifecycleTypes.py
|
onapdemo/sdc
|
3f1fee2ca76332b48e6f36662b32f2b5096c25e7
|
[
"Apache-2.0"
] | null | null | null |
catalog-be/src/main/resources/scripts/import/tosca/importNormativeInterfaceLifecycleTypes.py
|
onapdemo/sdc
|
3f1fee2ca76332b48e6f36662b32f2b5096c25e7
|
[
"Apache-2.0"
] | null | null | null |
catalog-be/src/main/resources/scripts/import/tosca/importNormativeInterfaceLifecycleTypes.py
|
onapdemo/sdc
|
3f1fee2ca76332b48e6f36662b32f2b5096c25e7
|
[
"Apache-2.0"
] | null | null | null |
import pycurl
import sys, getopt
from StringIO import StringIO
import json
import copy
from importNormativeElements import createNormativeElement
from importCommon import *
################################################################################################################################################
# #
# Import all users from a given file #
# #
# activation : #
# python importUsers.py [-i <be host> | --ip=<be host>] [-p <be port> | --port=<be port> ] [-f <input file> | --ifile=<input file> ] #
# #
# shortest activation (be host = localhost, be port = 8080): # #
# python importUsers.py [-f <input file> | --ifile=<input file> ] #
# #
################################################################################################################################################
def usage():
print sys.argv[0], '[-i <be host> | --ip=<be host>] [-p <be port> | --port=<be port> ] [-u <user userId> | --user=<user userId> ]'
def importNormativeInterfaceLifecycleType(beHost, bePort, adminUser, exitOnSuccess, fileDir):
result = createNormativeElement(beHost, bePort, adminUser, fileDir, "/sdc2/rest/v1/catalog/uploadType/interfaceLifecycle", "interfaceLifecycleTypes", "interfaceLifecycleTypeZip")
printFrameLine()
printNameAndReturnCode(result[0], result[1])
printFrameLine()
if ( result[1] == None or result[1] not in [200, 201, 409] ):
errorAndExit(1, None)
else:
if (exitOnSuccess == True):
errorAndExit(0, None)
def main(argv):
print 'Number of arguments:', len(sys.argv), 'arguments.'
beHost = 'localhost'
bePort = '8080'
adminUser = 'jh0003'
try:
opts, args = getopt.getopt(argv,"i:p:u:h:",["ip=","port=","user="])
except getopt.GetoptError:
usage()
errorAndExit(2, 'Invalid input')
for opt, arg in opts:
#print opt, arg
if opt == '-h':
usage()
sys.exit(3)
elif opt in ("-i", "--ip"):
beHost = arg
elif opt in ("-p", "--port"):
bePort = arg
elif opt in ("-u", "--user"):
adminUser = arg
print 'be host =',beHost,', be port =', bePort,', user =', adminUser
if ( beHost == None ):
usage()
sys.exit(3)
importNormativeInterfaceLifecycleType(beHost, bePort, adminUser, True, "../../../import/tosca/interface-lifecycle-types//")
if __name__ == "__main__":
main(sys.argv[1:])
| 35.289474
| 179
| 0.487696
|
e2a04fc5a915b2ee3ed1038ec16b6dcff68ca59c
| 20,245
|
py
|
Python
|
cold_posterior_bnn/core/prior.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 2
|
2022-01-21T18:15:34.000Z
|
2022-01-25T15:21:34.000Z
|
cold_posterior_bnn/core/prior.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 110
|
2021-10-01T18:22:38.000Z
|
2021-12-27T22:08:31.000Z
|
cold_posterior_bnn/core/prior.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 1
|
2022-02-10T10:43:10.000Z
|
2022-02-10T10:43:10.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Priors for neural network parameters.
This file provides probabilistic priors usable with Keras's regularization API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
tfd = tfp.distributions
class PriorRegularizer(tf.keras.regularizers.Regularizer):
"""Base class for regularizers based on proper priors."""
def __init__(self, weight=1.0):
"""Initialize prior regularizer.
Args:
weight: Tensor, scalar, float, >=0.0, the negative log-likelihood is
multiplied with this weight factor. This can be used, for example,
to ensure the log-prior is appropriately scaled with the total sample
size in Bayesian neural networks: weight=1.0/total_sample_size.
"""
self.weight = weight
def get_config(self):
return {'weight': self.weight}
def logpdf(self, x):
raise NotImplementedError('Derived classes need to implement '
'logpdf method.')
def __call__(self, w):
raise NotImplementedError('Derived classes need to implement '
'__call__ method.')
class NormalRegularizer(PriorRegularizer):
"""Zero mean Normal prior."""
def __init__(self, stddev=0.1, **kwargs):
"""Initialize a Normal prior.
Args:
stddev: Tensor, scalar, the standard deviation of the Normal prior.
**kwargs: keyword arguments passed to base class.
"""
self.stddev = stddev
super(NormalRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(NormalRegularizer, self).get_config()
config.update({'stddev': self.stddev})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
reg = self(x)
nelem = tf.cast(tf.size(x), x.dtype)
logz = nelem * (-math.log(self.stddev) - 0.5*math.log(2.0*math.pi))
ll = -reg + self.weight*logz # weight already in reg
return ll
def __call__(self, x):
return 0.5*self.weight*tf.reduce_sum(tf.square(x / self.stddev))
class ShiftedNormalRegularizer(PriorRegularizer):
"""Normal prior with non-zero mean.
"""
def __init__(self, mean=0, stddev=0.1, **kwargs):
"""Initialize a Normal prior.
The typical use case of this prior is to center the prior around a point
estimate (e.g., obtained by SGD). This leads to a posthoc non-informative
prior in the vicinity of a mode (point estimate).
Use the method utils.center_shifted_normal_around_model_weights(model) to
center all ShiftedNormalRegularizer in the given model around current
model weights.
Args:
mean: Tensor, multi-dimensional or scalar, the mean of the Normal prior.
If both mean and stddev are multi-dimensional tensors they must have
the shame shape.
stddev: Tensor, multi-dimensional or scalar, the standard deviation of
the Normal prior. If both mean and stddev are multi-dimensional
tensors they must have the shame shape.
**kwargs: keyword arguments passed to base class.
"""
self.mean = mean
self.stddev = stddev
super(ShiftedNormalRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(ShiftedNormalRegularizer, self).get_config()
config.update({'mean': self.mean})
config.update({'stddev': self.stddev})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
reg = self(x)
nelem = tf.cast(tf.size(x), x.dtype)
logz = nelem * (-math.log(self.stddev) - 0.5*math.log(2.0*math.pi))
ll = -reg + self.weight*logz # weight already in reg
return ll
def __call__(self, x):
return 0.5 * self.weight * tf.reduce_sum(
tf.square((x - self.mean) / self.stddev))
class StretchedNormalRegularizer(PriorRegularizer):
"""Stretched Normal regularization."""
def __init__(self, offset=1.0, scale=1.0, **kwargs):
"""Stretched Normal prior regularization.
The stretched Normal distribution has a flat part in the middle and then
Gaussian tails to the left and right. The univariate normalized density
function is determined by an offset and a scale parameter as follows:
p(x; offset, scale) = exp(-0.5*((|x|-offset)^2)/scale^2) /
(2*offset + sqrt(2*pi*scale^2))
{ if |x|-a >= 0 },
= 1.0 / (2*offset + sqrt(2*pi*scale^2))
{ if |x|-a < 0 }.
For offset=0 this distribution becomes a zero mean Normal(0,scale^2)
distribution.
Args:
offset: float, >= 0.0, the offset at which the Gaussian tails start.
scale: float, > 0.0, the Normal tail standard deviation.
**kwargs: keyword arguments passed to base class.
"""
self.offset = offset
self.scale = scale
super(StretchedNormalRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(StretchedNormalRegularizer, self).get_config()
config.update({'offset': self.offset,
'scale': self.scale})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
reg = self(x)
nelem = tf.cast(tf.size(x), x.dtype)
logz = nelem * (-math.log(
2.0*self.offset + self.scale*math.sqrt(2.0*math.pi)))
ll = -reg + self.weight*logz
return ll
def __call__(self, x):
diff = tf.abs(x) - self.offset
logp = tf.where(diff >= 0.0,
-0.5*tf.square(diff / self.scale),
tf.zeros_like(diff))
# Do not normalize (not necessary as not dependent on x)
# logp = logp - tf.math.log(2.0*self.offset +
# self.scale*math.sqrt(2.0*math.pi))
regularization = -self.weight * tf.reduce_sum(logp)
return regularization
def _compute_fans(shape, data_format='channels_last'):
"""Computes the number of input and output units for a weight shape.
Arguments:
shape: Integer shape tuple.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
Returns:
A tuple of scalars, `(fan_in, fan_out)`.
Raises:
ValueError: in case of invalid `data_format` argument.
"""
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) in {3, 4, 5}:
# Assuming convolution kernels (1D, 2D or 3D).
# TH kernel shape: (depth, input_depth, ...)
# TF kernel shape: (..., input_depth, depth)
if data_format == 'channels_first':
receptive_field_size = tf.reduce_prod(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
elif data_format == 'channels_last':
receptive_field_size = tf.reduce_prod(shape[:-2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
else:
raise ValueError('Invalid data_format: ' + data_format)
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif not shape: # len(shape) == 0
fan_in = fan_out = 1
else:
raise NotImplementedError()
return fan_in, fan_out
def _he_stddev(fan_in):
"""He-stddev scaling rule based on fan-in.
The original He-scaling, see Section 2.2 in
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
This initialization also has the "edge of chaos" property if the ReLU
activation is used, see Figure 4(b) in https://arxiv.org/pdf/1711.00165.pdf,
and thus maximizes information propagation.
Args:
fan_in: int, or int Tensor, >= 1.
Returns:
stddev: He scaling standard deviation.
"""
fan_in = tf.cast(fan_in, 'float32')
return tf.sqrt(2.0/fan_in)
class HeNormalRegularizer(PriorRegularizer):
"""He-inspired Normal regularization."""
def __init__(self, scale=1.0,
data_format='channels_last', **kwargs):
"""Initialize a He Normal prior.
Args:
scale: float, > 0.0, the He standard deviation is scaled with this factor.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
**kwargs: keyword arguments passed to base class.
"""
self.scale = scale
self.data_format = data_format
super(HeNormalRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(HeNormalRegularizer, self).get_config()
config.update({'scale': self.scale,
'data_format': self.data_format})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
raise NotImplementedError('logpdf not implemented.')
def __call__(self, x):
fan_in, _ = _compute_fans(x.shape, self.data_format)
stddev = self.scale*_he_stddev(fan_in)
reg_lambda = 0.5 * self.weight / (stddev**2.0)
regularization = reg_lambda * tf.reduce_sum(tf.square(x))
return regularization
class GlorotNormalRegularizer(PriorRegularizer):
"""Glorot-inspired Normal regularization."""
def __init__(self, scale=1.0,
data_format='channels_last', **kwargs):
"""Initialize a Glorot Normal prior.
Args:
scale: float, > 0.0, the Glorot standard deviation is scaled with this
factor.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
**kwargs: keyword arguments passed to base class.
"""
self.scale = scale
self.data_format = data_format
super(GlorotNormalRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(GlorotNormalRegularizer, self).get_config()
config.update({'scale': self.scale})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
raise NotImplementedError('logpdf not implemented.')
def __call__(self, x):
fan_in, fan_out = _compute_fans(x.shape, self.data_format)
def glorot_stddev(fan_in, fan_out):
fan_in = tf.cast(fan_in, 'float32')
fan_out = tf.cast(fan_out, 'float32')
return tf.sqrt(1.0/(0.5*(fan_in+fan_out)))
stddev = self.scale*glorot_stddev(fan_in, fan_out)
reg_lambda = 0.5 * self.weight / (stddev**2.0)
regularization = reg_lambda * tf.reduce_sum(tf.square(x))
return regularization
class LaplaceRegularizer(PriorRegularizer):
"""Zero mean Laplace prior."""
def __init__(self, stddev=0.1, **kwargs):
"""Initialize a Laplace prior.
Args:
stddev: Tensor, scalar, the standard deviation of the Laplace prior.
**kwargs: keyword arguments passed to base class.
"""
self.stddev = stddev
super(LaplaceRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(LaplaceRegularizer, self).get_config()
config.update({'stddev': self.stddev})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
reg = self(x)
nelem = tf.cast(tf.size(x), x.dtype)
logz = nelem * (-math.log(math.sqrt(2.0)*self.stddev))
ll = -reg + self.weight*logz
return ll
def __call__(self, x):
laplace_b = self.stddev / math.sqrt(2.0)
return self.weight*tf.reduce_sum(tf.abs(x / laplace_b))
class CauchyRegularizer(PriorRegularizer):
"""Zero mean Cauchy prior."""
def __init__(self, scale=1.0, **kwargs):
"""Initialize a Cauchy prior.
The [standard Cauchy
distribution](https://en.wikipedia.org/wiki/Cauchy_distribution)
contains a location and scale parameter. Here we fix the location to zero.
Args:
scale: float, > 0.0, the scale parameter of a zero-mean Cauchy
distribution.
**kwargs: keyword arguments passed to base class.
"""
self.scale = scale
super(CauchyRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(CauchyRegularizer, self).get_config()
config.update({'scale': self.scale})
return config
def logpdf(self, x):
"""Return the log pdf of the density times weight."""
reg = self(x)
nelem = tf.cast(tf.size(x), x.dtype)
logz = nelem * (-math.log(math.pi*self.scale))
ll = -reg + self.weight*logz
return ll
def __call__(self, x):
nll = tf.reduce_sum(tf.math.log1p(tf.math.square(x / self.scale)))
regularization = self.weight * nll
return regularization
class SpikeAndSlabRegularizer(PriorRegularizer):
"""Normal Spike-and-Slab prior."""
def __init__(self, scale_spike=0.001, scale_slab=0.4,
mass_spike=0.5, **kwargs):
"""Initialize a spike-and-slab prior.
Args:
scale_spike: Tensor, scalar, >0.0, the standard deviation of the Normal
spike component.
scale_slab: Tensor, scalar, >0.0, the standard deviation of the Normal
slab component.
mass_spike: Tensor, scalar, >0.0, <1.0, the probability mass associated
with the spike component.
**kwargs: keyword arguments passed to base class.
"""
self.scale_spike = scale_spike
self.scale_slab = scale_slab
self.mass_spike = mass_spike
super(SpikeAndSlabRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(SpikeAndSlabRegularizer, self).get_config()
config.update({'scale_spike': self.scale_spike,
'scale_slab': self.scale_slab,
'mass_spike': self.mass_spike})
return config
def logpdf(self, x):
return -self(x)
def __call__(self, w):
pss = tfd.Mixture(
cat=tfd.Categorical(
probs=[self.mass_spike, 1.0-self.mass_spike]),
components=[
tfd.Normal(loc=0.0, scale=self.scale_spike),
tfd.Normal(loc=0.0, scale=self.scale_slab)])
logp = tf.reduce_sum(pss.log_prob(w))
return -self.weight*logp
def inverse_gamma_shape_scale_from_mean_stddev(mean, stddev):
"""Compute inverse Gamma shape and scale from mean and standard deviation.
Args:
mean: Tensor, scalar, >0.0, the mean of the Inverse Gamma variate.
stddev: Tensor, scalar, >0.0, the standard deviation of the Inverse Gamma
variate.
Returns:
ig_shape: Tensor, scalar, >0.0, the inverse Gamma shape parameter.
ig_scale: Tensor, scalar, >0.0, the inverse Gamma scale parameter.
"""
cvar = (mean / stddev)**2.0
ig_shape = cvar + 2.0
ig_scale = mean*(cvar + 1.0)
return ig_shape, ig_scale
class EmpiricalBayesNormal(PriorRegularizer):
r"""Empirical Bayes Normal prior.
#### Mathematical details
We assume a hierarchical prior:
1. v ~ InverseGamma(ig_shape, ig_scale)
2. w_i ~ Normal(0, v), i=1,..,n.
We then define the empirical Bayes choice
\\(
v_* = \frac{ig_scale + (1/2) \sum_i w_i^2}{ig_shape + n/2 + 1},
\\)
and use the empirical Bayes prior \\(p(w) := \prod_i Normal(w_i; 0, v_*).\\)
Note that this is not guaranteed to be a proper prior for n == 1.
"""
def __init__(self, ig_shape=2.01, ig_scale=0.101, **kwargs):
r"""Construct an empirical Bayes Normal regularizer.
Args:
ig_shape: Tensor, scalar, float, >0.0, the shape parameter of the inverse
Gamma distribution.
ig_scale: Tensor, scalar, float, >0.0, the scale parameter of the
inverse Gamma distribution.
**kwargs: keyword arguments passed to base class.
"""
self.ig_shape = ig_shape
self.ig_scale = ig_scale
super(EmpiricalBayesNormal, self).__init__(**kwargs)
def get_config(self):
config = super(EmpiricalBayesNormal, self).get_config()
config.update({'ig_shape': float(self.ig_shape),
'ig_scale': float(self.ig_scale)})
return config
@staticmethod
def from_stddev(stddev, weight=1.0):
"""Create Empirical Bayes Normal prior with specified marginal mean stddev.
The distribution is constructed as:
1. v ~ InverseGamma(ig_shape, ig_scale)
So that E[v] = Var[v] = stddev^2, and
2. w_i ~ Normal(0, v), i=1,...,n.
Args:
stddev: Tensor, scalar, float, >0.0, the marginal mean variance of the
distribution.
weight: Tensor, scalar, float, >=0.0, the negative log-likelihood is
multiplied with this weight factor. This can be used, for example,
to ensure the log-prior is appropriately scaled with the total sample
size in Bayesian neural networks: weight=1.0/total_sample_size.
Returns:
prior: EmpiricalBayesNormal prior with suitable parameters.
"""
variance = stddev**2.0
ig_shape, ig_scale = inverse_gamma_shape_scale_from_mean_stddev(
variance, stddev)
return EmpiricalBayesNormal(ig_shape, ig_scale, weight=weight)
def __call__(self, w):
w2sum = tf.reduce_sum(tf.square(w))
n = tf.cast(tf.size(w), tf.float32)
# Posterior variance estimate
vhat = (self.ig_scale + 0.5*w2sum) / (self.ig_shape + 0.5*n + 1.0)
vhatsqrt = tf.math.sqrt(vhat)
logp = -0.5*n*tf.math.log(2.0*math.pi)
logp += -0.5*n*tf.math.log(vhat)
logp += -0.5*tf.reduce_sum(tf.square(w / vhatsqrt))
return -self.weight * logp
class HeNormalEBRegularizer(PriorRegularizer):
"""He-inspired Normal Empirical Bayes regularization."""
def __init__(self, scale=1.0, data_format='channels_last', **kwargs):
"""Initialize a He Normal empirical Bayes prior.
The empirical Bayes regularization is constructed as:
1. v ~ InverseGamma(ig_shape, ig_scale)
Where ig_shape and ig_scale are chosen such that
E[v] = Var[v] = (scale * he_stddev)^2, and
2. w_i ~ Normal(0, v), i=1,...,n.
The regularization is then
- sum_i log Normal(w_i; 0, vhat(w)),
where
vhat(w) := argmax_v p(v | w) under the model above.
We can solve vhat(w) analytically because of conjugacy in the above model.
For tf.size(w) >= 2 the induced prior p*(w) is normalizable.
Args:
scale: float, > 0.0, the He standard deviation is scaled with this factor.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
**kwargs: keyword arguments passed to base class.
"""
self.scale = scale
self.data_format = data_format
super(HeNormalEBRegularizer, self).__init__(**kwargs)
def get_config(self):
config = super(HeNormalEBRegularizer, self).get_config()
config.update({'scale': self.scale,
'data_format': self.data_format})
return config
def logpdf(self, w):
return -self(w)
def __call__(self, w):
n = tf.size(w)
n = tf.cast(n, tf.float32)
fan_in, _ = _compute_fans(w.shape, self.data_format)
stddev = self.scale*_he_stddev(fan_in)
variance = stddev**2.0
ig_shape, ig_scale = inverse_gamma_shape_scale_from_mean_stddev(
variance, variance)
w2sum = tf.reduce_sum(tf.square(w))
# Posterior variance estimate
vhat = (ig_scale + 0.5*w2sum) / (ig_shape + 0.5*n + 1.0)
vhatsqrt = tf.math.sqrt(vhat)
logp = -0.5*n*tf.math.log(2.0*math.pi)
logp += -0.5*n*tf.math.log(vhat)
logp += -0.5*tf.reduce_sum(tf.square(w / vhatsqrt))
return -self.weight * logp
| 32.811994
| 108
| 0.66614
|
838a7fb0634bde4f43583cd160f50f00a008ca27
| 1,304
|
py
|
Python
|
app/systems/manager.py
|
venturiscm/hcp
|
74ad18180822301274daa9218d7bd9fbdb7807f7
|
[
"Apache-2.0"
] | 1
|
2020-06-22T21:25:52.000Z
|
2020-06-22T21:25:52.000Z
|
app/systems/manager.py
|
venturiscm/hcp
|
74ad18180822301274daa9218d7bd9fbdb7807f7
|
[
"Apache-2.0"
] | 1
|
2020-05-21T02:46:24.000Z
|
2020-05-25T07:19:23.000Z
|
app/systems/manager.py
|
venturiscm/hcp
|
74ad18180822301274daa9218d7bd9fbdb7807f7
|
[
"Apache-2.0"
] | null | null | null |
from systems.manage import service, runtime, template, task
from systems.indexer import Indexer
from utility.terminal import TerminalMixin
from utility.environment import Environment
import pathlib
import copy
import logging
logger = logging.getLogger(__name__)
class Manager(
TerminalMixin,
service.ManagerServiceMixin,
runtime.ManagerRuntimeMixin,
task.ManagerTaskMixin,
template.ManagerTemplateMixin
):
def __init__(self):
self.env = Environment.get_env()
super().__init__()
pathlib.Path(self.module_dir).mkdir(parents = True, exist_ok = True)
self.index = Indexer(self)
self.index.register_core_module()
self.index.update_search_path()
self.index.collect_environment()
def cleanup(self):
super().cleanup()
def get_spec(self, location = None, default = None):
spec = self.index.spec
if location is None:
return spec
if isinstance(location, str):
location = location.split('.')
if default is None:
default = {}
for index, element in enumerate(location):
inner_default = default if index == len(location) - 1 else {}
spec = spec.get(element, inner_default)
return copy.deepcopy(spec)
| 24.603774
| 76
| 0.660276
|
535f29a4c3963b73d684cb4ff1ab2ca104369768
| 365
|
py
|
Python
|
stochastic/__version__.py
|
Gabinou/NHPPy
|
1068b1548d008771a58d5479d8333703c54abbed
|
[
"MIT"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
stochastic/__version__.py
|
noisyoscillator/stochastic
|
168659c36fd16a33f69b1f21654a7661286dc9d0
|
[
"MIT"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
stochastic/__version__.py
|
noisyoscillator/stochastic
|
168659c36fd16a33f69b1f21654a7661286dc9d0
|
[
"MIT"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
"""Version information."""
__title__ = "stochastic"
__description__ = "Stochastic process realizations."
__url__ = "https://github.com/crflynn/stochastic"
__version__ = "0.4.0"
__author__ = "Christopher Flynn"
__author_email__ = "crf204@gmail.com"
__license__ = "MIT"
__copyright__ = "Copyright 2018 Christopher Flynn"
__docs_copyright__ = "2018 Christopher Flynn"
| 33.181818
| 52
| 0.775342
|
b94f2d35a0eeb14e802a026705bc8c44f04c58e9
| 137
|
py
|
Python
|
teradata/datadog_checks/teradata/__about__.py
|
OuesFa/integrations-core
|
0ffe4ca306580a2e775b515152384034c2dfdc03
|
[
"BSD-3-Clause"
] | null | null | null |
teradata/datadog_checks/teradata/__about__.py
|
OuesFa/integrations-core
|
0ffe4ca306580a2e775b515152384034c2dfdc03
|
[
"BSD-3-Clause"
] | null | null | null |
teradata/datadog_checks/teradata/__about__.py
|
OuesFa/integrations-core
|
0ffe4ca306580a2e775b515152384034c2dfdc03
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '1.0.0'
| 27.4
| 59
| 0.722628
|
2fb2e3f7e59405ddc15d522624d3772b2b5bef5f
| 571
|
py
|
Python
|
uri/Estruturas e Bibliotecas/1022.py
|
vieirafrancisco/daily
|
78a288290bcb222c8f0ac0e407a753e65dadec77
|
[
"MIT"
] | null | null | null |
uri/Estruturas e Bibliotecas/1022.py
|
vieirafrancisco/daily
|
78a288290bcb222c8f0ac0e407a753e65dadec77
|
[
"MIT"
] | null | null | null |
uri/Estruturas e Bibliotecas/1022.py
|
vieirafrancisco/daily
|
78a288290bcb222c8f0ac0e407a753e65dadec77
|
[
"MIT"
] | null | null | null |
def mdc(a, b):
if b == 0:
return a
return mdc(b, a % b)
def solve(n1, d1, op, n2, d2):
if op == "+":
x = n1*d2 + n2*d1
y = d1*d2
elif op == "-":
x = n1*d2 - n2*d1
y = d1*d2
elif op == "/":
x = n1*d2
y = n2*d1
elif op == "*":
x = n1*n2
y = d1*d2
return x, y
n = int(input())
for _ in range(n):
n1, _, d1, op, n2, _, d2 = input().split()
x, y = solve(int(n1), int(d1), op, int(n2), int(d2))
m = mdc(x, y)
print("{}/{} = {}/{}".format(x, y, x//m, y//m))
| 20.392857
| 56
| 0.388792
|
76d8fb97b9f15ca0d0f887f731012778d373f7cb
| 3,412
|
py
|
Python
|
src/poliastro/frames/ecliptic.py
|
sundeshgupta/poliastro
|
0a269d43c8a082df3323d38ce73f5e1ae3262ccd
|
[
"MIT"
] | 1
|
2019-04-12T12:28:37.000Z
|
2019-04-12T12:28:37.000Z
|
src/poliastro/frames/ecliptic.py
|
sundeshgupta/poliastro
|
0a269d43c8a082df3323d38ce73f5e1ae3262ccd
|
[
"MIT"
] | 1
|
2018-02-22T08:06:06.000Z
|
2018-02-22T08:06:06.000Z
|
src/poliastro/frames/ecliptic.py
|
sundeshgupta/poliastro
|
0a269d43c8a082df3323d38ce73f5e1ae3262ccd
|
[
"MIT"
] | null | null | null |
import numpy as np
from astropy import _erfa as erfa, units as u
from astropy.coordinates import (
BaseEclipticFrame,
CartesianRepresentation,
DynamicMatrixTransform,
GeocentricMeanEcliptic,
HeliocentricEclipticIAU76 as HeliocentricEclipticJ2000,
TimeAttribute,
UnitSphericalRepresentation,
frame_transform_graph,
get_body,
)
from astropy.coordinates.builtin_frames.utils import DEFAULT_OBSTIME, get_jd12
from astropy.coordinates.matrix_utilities import (
matrix_product,
matrix_transpose,
rotation_matrix,
)
from .equatorial import GCRS
__all__ = [
"GeocentricSolarEcliptic",
"GeocentricMeanEcliptic",
"HeliocentricEclipticJ2000",
]
class GeocentricSolarEcliptic(BaseEclipticFrame):
"""
This system has its X axis towards the Sun and its Z axis perpendicular to
the plane of the Earth's orbit around the Sun (positive North). This system
is fixed with respect to the Earth-Sun line. It is convenient for specifying
magnetospheric boundaries. It has also been widely adopted as the system for
representing vector quantities in space physics databases.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@frame_transform_graph.transform(DynamicMatrixTransform, GCRS, GeocentricSolarEcliptic)
def gcrs_to_geosolarecliptic(gcrs_coo, to_frame):
if not to_frame.obstime.isscalar:
raise ValueError(
"To perform this transformation the obstime Attribute must be a scalar."
)
_earth_orbit_perpen_point_gcrs = UnitSphericalRepresentation(
lon=0 * u.deg, lat=(90 * u.deg - _obliquity_rotation_value(to_frame.obstime))
)
_earth_detilt_matrix = _make_rotation_matrix_from_reprs(
_earth_orbit_perpen_point_gcrs, CartesianRepresentation(0, 0, 1)
)
sun_pos_gcrs = get_body("sun", to_frame.obstime).cartesian
earth_pos_gcrs = get_body("earth", to_frame.obstime).cartesian
sun_earth = sun_pos_gcrs - earth_pos_gcrs
sun_earth_detilt = sun_earth.transform(_earth_detilt_matrix)
# Earth-Sun Line in Geocentric Solar Ecliptic Frame
x_axis = CartesianRepresentation(1, 0, 0)
rot_matrix = _make_rotation_matrix_from_reprs(sun_earth_detilt, x_axis)
return matrix_product(rot_matrix, _earth_detilt_matrix)
@frame_transform_graph.transform(DynamicMatrixTransform, GeocentricSolarEcliptic, GCRS)
def geosolarecliptic_to_gcrs(from_coo, gcrs_frame):
return matrix_transpose(gcrs_to_geosolarecliptic(gcrs_frame, from_coo))
def _obliquity_rotation_value(equinox):
"""
Function to calculate obliquity of the earth.
This uses obl06 of erfa.
"""
jd1, jd2 = get_jd12(equinox, "tt")
obl = erfa.obl06(jd1, jd2) * u.radian
return obl.to(u.deg)
def _make_rotation_matrix_from_reprs(start_representation, end_representation):
"""
Return the matrix for the direct rotation from one representation to a second representation.
The representations need not be normalized first.
"""
A = start_representation.to_cartesian()
B = end_representation.to_cartesian()
rotation_axis = A.cross(B)
rotation_angle = -np.arccos(
A.dot(B) / (A.norm() * B.norm())
) # negation is required
# This line works around some input/output quirks of Astropy's rotation_matrix()
matrix = np.array(rotation_matrix(rotation_angle, rotation_axis.xyz.value.tolist()))
return matrix
| 33.126214
| 97
| 0.753224
|
229dc3f84cb7ea39964576acfffd98427bf1469c
| 4,440
|
py
|
Python
|
archive/Model/rnn/time_series_shampoo/time_series.py
|
KrisCheng/Hitchhiker-Guide-to-Machine-Learning
|
676edabc8690727b22189536b28de3e2dad0f08c
|
[
"MIT"
] | 4
|
2018-01-05T02:54:18.000Z
|
2018-09-02T10:05:54.000Z
|
archive/Model/rnn/time_series_shampoo/time_series.py
|
KrisCheng/Hitchhiker-Guide-to-Machine-Learning
|
676edabc8690727b22189536b28de3e2dad0f08c
|
[
"MIT"
] | null | null | null |
archive/Model/rnn/time_series_shampoo/time_series.py
|
KrisCheng/Hitchhiker-Guide-to-Machine-Learning
|
676edabc8690727b22189536b28de3e2dad0f08c
|
[
"MIT"
] | 2
|
2017-10-02T11:43:54.000Z
|
2017-10-07T10:52:22.000Z
|
'''
Desc: time series forecasting with LSTM in Python.
Author: Kris Peng
Ref: https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
Copyright (c) 2017 - Kris Peng <kris.dacpc@gmail.com>
'''
# load and plot dataset
from pandas import read_csv
from pandas import datetime
from pandas import DataFrame
from pandas import concat
from pandas import Series
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
import numpy
# load dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# trans a sequence problem to a supervised learning problem
def timeseries_to_supervised(data, lag = 1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis = 1)
df.fillna(0, inplace = True)
return df
# create a differenced series
def difference(dataset, interval = 1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval = 1):
return yhat + history[-interval]
# scale train and test data to [-1. 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range = (-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape = (batch_size, X.shape[1], X.shape[2]), stateful = True))
model.add(Dense(1))
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
for i in range(nb_epoch):
model.fit(X, y, epochs = 1, batch_size = batch_size, verbose = 0, shuffle = False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
# the prediction
yhat = model.predict(X, batch_size = batch_size)
return yhat[0,0]
# load dataset
series = read_csv('shampoo-sales.csv', header = 0, parse_dates = [0], index_col = 0, squeeze = True, date_parser = parser)
# transform data to be stationary
raw_values = series.values
print(raw_values)
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-12], supervised_values[-12:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# fit the model
lstm_model = fit_lstm(train_scaled, 1, 3, 4)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size = 1)
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat)
expected = raw_values[len(train) + i + 1]
print('Month = %d, redicted = %f, Expected = %f' % (i + 1, yhat, expected))
# report performance
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('Test RMSE: %.3f' % rmse)
pyplot.plot(raw_values[-12:])
pyplot.plot(predictions)
pyplot.show()
| 32.408759
| 122
| 0.707883
|
c8238eae57df440ab497af5eaa4d6c5648f4ce7f
| 785
|
py
|
Python
|
scripts/filter.py
|
jaistark/sp
|
911933c65f950e6bc51451840068ca9249554846
|
[
"BSD-2-Clause"
] | 28
|
2015-03-04T08:34:40.000Z
|
2022-02-13T05:59:11.000Z
|
scripts/filter.py
|
jaistark/sp
|
911933c65f950e6bc51451840068ca9249554846
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/filter.py
|
jaistark/sp
|
911933c65f950e6bc51451840068ca9249554846
|
[
"BSD-2-Clause"
] | 14
|
2015-03-04T08:34:42.000Z
|
2020-12-08T16:13:37.000Z
|
import snap
import sys
# Filter the graph
graph = sys.argv[1]
G = snap.LoadEdgeList(snap.PNGraph, graph)
G_filter = snap.PNGraph.New()
for u_ in G.Nodes():
u = u_.GetId()
G_filter.AddNode(u)
edge_hash = {}
for u_ in G.Nodes():
u = u_.GetId()
for v in u_.GetOutEdges():
for w in u_.GetInEdges():
if (v == w or u == v or w == u):
continue
if not G.IsEdge(v, w):
continue
# w -> v -> u -> w
edge_hash[(v, u)] = 1
edge_hash[(u, w)] = 1
edge_hash[(w, v)] = 1
for u, v in edge_hash:
G_filter.AddEdge(u, v)
G_filter = snap.ConvertGraph(snap.PNGraph, G_filter, True)
out_name = graph.split('.txt')[0] + '-filter.txt'
snap.SaveEdgeList(G_filter, out_name)
| 23.088235
| 58
| 0.547771
|
1aed06b1b529e8b6f9a0b742101535cede810437
| 1,874
|
py
|
Python
|
caesar_cipher.py
|
joejnke/security
|
5e252f26560864f54be64cc89a2d538db1731188
|
[
"MIT"
] | null | null | null |
caesar_cipher.py
|
joejnke/security
|
5e252f26560864f54be64cc89a2d538db1731188
|
[
"MIT"
] | 1
|
2019-04-19T20:23:30.000Z
|
2019-04-19T20:23:30.000Z
|
caesar_cipher.py
|
joejnke/security
|
5e252f26560864f54be64cc89a2d538db1731188
|
[
"MIT"
] | 1
|
2019-04-19T19:56:22.000Z
|
2019-04-19T19:56:22.000Z
|
# A mapping from letter to integer
L2I = dict(zip("ABCDEFGHIJKLMNOPQRSTUVWXYZ",range(26)))
# A mapping from Interger to lettter
I2L = dict(zip(range(26),"ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
'''-------------------------------------------------------------------------------
Function: Get the encryption or decryption key if it is in range between 1- 26
Returns: Key
Arguments: None
----------------------------------------------------------------------------------'''
def getKey():
while True:
print('Enter the key for encryption (between 1 - 26)')
key = int(input())
if (key >= 1 and key <= 26):
return key
return key
'''----------------------------------------------------------------------
Function: Encrypts a text input with a given key using caesar cipher
Returns: Ciphered Text
Arguments: None
-------------------------------------------------------------------------'''
def encryptCaesarCipher():
plainText = input("Enter text to be encrypted here:")
key = getKey()
cipherText =''
for letter in plainText.upper():
if letter.isalpha():
cipherText += I2L[ (L2I[letter] + key)%26 ]
else:
cipherText += letter
return cipherText
'''----------------------------------------------------------------------
Function: Descrypts a ciphered text with a given key using caesar cipher
Returns: Plain Text
Arguments: None
-------------------------------------------------------------------------'''
def decryptCaesarCipher():
cipherText = input("Enter text to be decrypted here:")
key = getKey()
print (key)
plainText = ''
for letter in cipherText.upper():
if letter.isalpha():
plainText += I2L[ (L2I[letter] - key)%26 ]
else:
plainText += letter
return plainText
| 31.762712
| 85
| 0.468517
|
16600f54e8037c4809e742a712314ab956d42b13
| 1,783
|
py
|
Python
|
tensorflow/python/distribute/reduce_util.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/distribute/reduce_util.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/distribute/reduce_util.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for reduce operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
@tf_export("distribute.ReduceOp")
class ReduceOp(enum.Enum):
"""Indicates how a set of values should be reduced.
* `SUM`: Add all the values.
* `MEAN`: Take the arithmetic mean ("average") of the values.
"""
# TODO(priyag): Add the following types:
# `MIN`: Return the minimum of all values.
# `MAX`: Return the maximum of all values.
SUM = "SUM"
MEAN = "MEAN"
@staticmethod
def from_variable_aggregation(aggregation):
mapping = {
variable_scope.VariableAggregation.SUM: ReduceOp.SUM,
variable_scope.VariableAggregation.MEAN: ReduceOp.MEAN,
}
reduce_op = mapping.get(aggregation)
if not reduce_op:
raise ValueError("Could not convert from `tf.VariableAggregation` %s to"
"`tf.distribute.ReduceOp` type" % aggregation)
return reduce_op
| 34.288462
| 80
| 0.701066
|
e2ee9b13e9081b478b9b03059dbae5f2cec91d82
| 685
|
py
|
Python
|
PlayGround/observer/migrations/0001_initial.py
|
mordsm/PlayGround
|
76734ec1973dc4a3b7c70e01004e1b0891a6c6ed
|
[
"Apache-2.0"
] | null | null | null |
PlayGround/observer/migrations/0001_initial.py
|
mordsm/PlayGround
|
76734ec1973dc4a3b7c70e01004e1b0891a6c6ed
|
[
"Apache-2.0"
] | null | null | null |
PlayGround/observer/migrations/0001_initial.py
|
mordsm/PlayGround
|
76734ec1973dc4a3b7c70e01004e1b0891a6c6ed
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-23 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ActiveData',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=200)),
('reminder', models.CharField(max_length=200)),
('action', models.CharField(max_length=200)),
],
),
]
| 27.4
| 117
| 0.575182
|
9c184a836994027cdc16a010121239da93e614d7
| 2,505
|
py
|
Python
|
deform_jinja2/test.py
|
Weasyl/deform_jinja2
|
5d92efa94ca0028dc99a1183e6dbe0288e62e223
|
[
"MIT"
] | null | null | null |
deform_jinja2/test.py
|
Weasyl/deform_jinja2
|
5d92efa94ca0028dc99a1183e6dbe0288e62e223
|
[
"MIT"
] | null | null | null |
deform_jinja2/test.py
|
Weasyl/deform_jinja2
|
5d92efa94ca0028dc99a1183e6dbe0288e62e223
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import colander
import deform
import deform_jinja2
import deform_jinja2.translator
import unittest
from colander import Invalid, _
from deform.exception import ValidationFailure
from pyramid import testing
from deform_jinja2.translator import PyramidTranslator
import translationstring
class TestTranslation(unittest.TestCase):
def setUp(self):
request = testing.DummyRequest()
settings={'deform_jinja2.template_search_path':'deform_jinja2:uni_templates'}
self.config = testing.setUp(request=request, settings=settings)
self.config.include('deform_jinja2')
def runTest(self):
# runTest() defined to allow TestTranslation() in console
pass # pragma no cover
def test_translation(self):
invalid = None
# From colander.Range():
def translate_error(node, value):
min_err = _(_('${val} is less than minimum value ${min}'),
mapping={'val':'value', 'min':'min'})
raise Invalid(node, min_err)
class TestSchema(colander.MappingSchema):
s = colander.SchemaNode(colander.String(), missing="", validator=translate_error)
f = deform.Form(TestSchema())
try:
a = f.validate([('s', 'invalid')]) # validator not called if missing
except ValidationFailure as e:
invalid = e.render()
assert invalid, "ValidationFailure was not raised"
assert "ctrlHolder" in invalid, "uni-form template was not used"
assert "value is less than minimum value min" in invalid
def excercise_translator(self, t):
assert t.gettext('term') == 'term'
assert t.ngettext('term', 'terms', 2) == 'terms'
assert t.ngettext(_('term'), _('terms'), 1) == 'term'
assert t.ngettext(_('term'), _('terms'), 2) == 'terms'
def test_pyramid_translator(self):
t = PyramidTranslator()
_ = translationstring.TranslationStringFactory('deform')
self.excercise_translator(t)
def test_dummy_translator(self):
dt = deform_jinja2.DummyTranslator()
self.excercise_translator(dt)
def test_default_translator_is_dummy(self):
rf = deform_jinja2.jinja2_renderer_factory() # coverage
assert rf.env.globals['gettext'] == deform_jinja2.DummyTranslator().gettext
assert rf.env.globals['ngettext'] == deform_jinja2.DummyTranslator().ngettext
| 39.761905
| 93
| 0.647505
|
448ba8c2d26b8f46f51a9350a196da9ad2ce2dfb
| 393
|
py
|
Python
|
sites/verdadeiro/comments/comments/wsgi.py
|
JoaoPedro-M/Python
|
4697cdae15cf9b9fb242f210e5a79ea182525ba3
|
[
"MIT"
] | null | null | null |
sites/verdadeiro/comments/comments/wsgi.py
|
JoaoPedro-M/Python
|
4697cdae15cf9b9fb242f210e5a79ea182525ba3
|
[
"MIT"
] | null | null | null |
sites/verdadeiro/comments/comments/wsgi.py
|
JoaoPedro-M/Python
|
4697cdae15cf9b9fb242f210e5a79ea182525ba3
|
[
"MIT"
] | null | null | null |
"""
WSGI config for comments project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'comments.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
320a41200c4743b1c01f18e2a6163abd548b602c
| 1,455
|
py
|
Python
|
source/mission/server.py
|
nikhilrj/CARDS
|
509815f23c11881e6444308fa1014aed6a1af358
|
[
"MIT"
] | null | null | null |
source/mission/server.py
|
nikhilrj/CARDS
|
509815f23c11881e6444308fa1014aed6a1af358
|
[
"MIT"
] | null | null | null |
source/mission/server.py
|
nikhilrj/CARDS
|
509815f23c11881e6444308fa1014aed6a1af358
|
[
"MIT"
] | null | null | null |
import sys
import os
import socket
import SocketServer
import rsa
import time, select
from control import *
#from variable import *
#global mission
class PiServer():
#def __init__(self, Ip = '127.0.0.1', port = 1337, sz = 2048, keySz = 256):
### Process of generating a public and private key ###
def keyExchange(self):
(pubKey, self.privKey) = rsa.newkeys(bitKeySize)
pubKeyN = pubKey.n
pubKeyE = pubKey.e
pubKeyN = str(pubKeyN)
pubKeyE = str(pubKeyE)
conn.send(pubKeyN)
time.sleep(1)
conn.send(pubKeyE)
print 'Client Public key sent.'
def operation(self, motors=None):
global CFC
CFC.update(PiServer.operation)
if select.select([conn], [], [], 0)[0]:
encryptedMessage = conn.recv(size)
decryptedMessage = rsa.decrypt(encryptedMessage, self.privKey)
print decryptedMessage.lower()
motors.drive(25, 25)
#time.sleep(1)
return decryptedMessage.lower()
def send(self, msg):
conn.send(str(msg))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__dict__.__str__()
if __name__ == '__main__':
server = PiServer()
server.keyExchange()
while True:
print server.serverOperation()
testIP = '192.168.0.102'
portListen = 9038
size = 2048
bitKeySize = 256
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((testIP, portListen))
server.listen(1)
conn, client_addr = server.accept()
print 'Connected to Client.'
| 20.208333
| 76
| 0.705155
|
07cb13ebbbf7c04235fcf6f4876ae4903c303994
| 1,169
|
py
|
Python
|
packages/python/yap_kernel/yap_ipython/utils/data.py
|
ryandesign/yap
|
9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214
|
[
"Artistic-1.0-Perl",
"ClArtistic"
] | 90
|
2015-03-09T01:24:15.000Z
|
2022-02-24T13:56:25.000Z
|
packages/python/yap_kernel/yap_ipython/utils/data.py
|
ryandesign/yap
|
9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214
|
[
"Artistic-1.0-Perl",
"ClArtistic"
] | 52
|
2016-02-14T08:59:37.000Z
|
2022-03-14T16:39:35.000Z
|
packages/python/yap_kernel/yap_ipython/utils/data.py
|
ryandesign/yap
|
9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214
|
[
"Artistic-1.0-Perl",
"ClArtistic"
] | 27
|
2015-11-19T02:45:49.000Z
|
2021-11-25T19:47:58.000Z
|
# encoding: utf-8
"""Utilities for working with data structures like lists, dicts and tuples.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
Note: All elements in the input must be hashable for this routine
to work, as it internally uses a set for efficiency reasons.
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def flatten(seq):
"""Flatten a list of lists (NOT recursive, only works for 2d lists)."""
return [x for subseq in seq for x in subseq]
def chop(seq, size):
"""Chop a sequence into chunks of the given size."""
return [seq[i:i+size] for i in range(0,len(seq),size)]
| 31.594595
| 78
| 0.596236
|
57e62df19bff48a56dfcf257bdb80ff622ca89fb
| 1,704
|
py
|
Python
|
semestr_01/IAP/optimalizace.py
|
zmatlik117/iap
|
107d0dddf8beccf01a3f02069b704ce5fd9f5df7
|
[
"MIT"
] | null | null | null |
semestr_01/IAP/optimalizace.py
|
zmatlik117/iap
|
107d0dddf8beccf01a3f02069b704ce5fd9f5df7
|
[
"MIT"
] | null | null | null |
semestr_01/IAP/optimalizace.py
|
zmatlik117/iap
|
107d0dddf8beccf01a3f02069b704ce5fd9f5df7
|
[
"MIT"
] | null | null | null |
from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np
def tested_function(x):
"""
Testovana funkce
Da se sem napsat vselijaka cunarna
"""
freq = 1
damp_fac = 0.1
val = np.sin(freq * x)
damp = np.exp(-1 * damp_fac * abs(x))
return val * damp
# kolikrat hodlam opakovat optimalizacni vypocet
no_samples = 10
# kolik kroku chci v optimalizaci provest
niter = 1000
# kde si tipnu minimum - je to umyslne nastaveny nekam totalne do pryc aby se to obcas netrefilo
original_estimate = 8
samples = []
# na kolik samplu chci natrhat osu x
num = int(1e4)
x_axis = np.linspace(start=-20, stop=20, num=num)
# akorat alias pro funkci aby se to kdyztak lip prebiralo pres copy-paste-modify
function = tested_function
# resim grafovani
fig, ax = plt.subplots()
ax.plot(x_axis, function(x_axis), 'b')
# tuknu tam prvni vykop kde cekam minimum
ax.plot(original_estimate, function(original_estimate), marker='x', color='r')
ax.set(xlabel='x', ylabel='myfunc', title='tested function')
ax.grid()
plt.show(block=False)
# probehnu vsechny vypocty
for iteration in range(no_samples):
print("iteration %s" % iteration)
# basinhop je principialne dost podobnej simulated annealing, proto je
# simulated annealing ve scipy deprecated
min_basinhop = optimize.basinhopping(func=function, x0=original_estimate, niter=niter)['x']
result = function(min_basinhop)
samples.append((min_basinhop, result))
# prasknu to do grafu jako bod
ax.plot(min_basinhop, result, marker='o')
fig.canvas.draw()
plt.show()
# a vysypu do konzole nalezeny minima - ne vzdy se trefi do globalniho
for sample in samples:
print(sample[0], sample[1])
| 29.894737
| 96
| 0.723005
|
b97121c89ab1922bb8ae340f382d2753196bff1a
| 5,944
|
py
|
Python
|
sleekxmpp/componentxmpp.py
|
imo/SleekXMPP
|
8175ed572888551314fe43304ab8acd2278c809b
|
[
"BSD-3-Clause"
] | 3
|
2019-02-01T06:50:08.000Z
|
2020-03-24T00:45:31.000Z
|
sleekxmpp/componentxmpp.py
|
imo/SleekXMPP
|
8175ed572888551314fe43304ab8acd2278c809b
|
[
"BSD-3-Clause"
] | null | null | null |
sleekxmpp/componentxmpp.py
|
imo/SleekXMPP
|
8175ed572888551314fe43304ab8acd2278c809b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sleekxmpp.clientxmpp
~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to external server component connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
from __future__ import absolute_import
import logging
import sys
import hashlib
from sleekxmpp.basexmpp import BaseXMPP
from sleekxmpp.xmlstream import XMLStream
from sleekxmpp.xmlstream import ET
from sleekxmpp.xmlstream.matcher import MatchXPath
from sleekxmpp.xmlstream.handler import Callback
log = logging.getLogger(__name__)
class ComponentXMPP(BaseXMPP):
"""
SleekXMPP's basic XMPP server component.
Use only for good, not for evil.
:param jid: The JID of the component.
:param secret: The secret or password for the component.
:param host: The server accepting the component.
:param port: The port used to connect to the server.
:param plugin_config: A dictionary of plugin configurations.
:param plugin_whitelist: A list of approved plugins that
will be loaded when calling
:meth:`~sleekxmpp.basexmpp.BaseXMPP.register_plugins()`.
:param use_jc_ns: Indicates if the ``'jabber:client'`` namespace
should be used instead of the standard
``'jabber:component:accept'`` namespace.
Defaults to ``False``.
"""
def __init__(self, jid, secret, host=None, port=None,
plugin_config={}, plugin_whitelist=[], use_jc_ns=False):
if use_jc_ns:
default_ns = 'jabber:client'
else:
default_ns = 'jabber:component:accept'
BaseXMPP.__init__(self, jid, default_ns)
self.auto_authorize = None
self.stream_header = "<stream:stream %s %s to='%s'>" % (
'xmlns="jabber:component:accept"',
'xmlns:stream="%s"' % self.stream_ns,
jid)
self.stream_footer = "</stream:stream>"
self.server_host = host
self.server_port = port
self.secret = secret
self.plugin_config = plugin_config
self.plugin_whitelist = plugin_whitelist
self.is_component = True
self.register_handler(
Callback('Handshake',
MatchXPath('{jabber:component:accept}handshake'),
self._handle_handshake))
self.add_event_handler('presence_probe',
self._handle_probe)
def connect(self, host=None, port=None, use_ssl=False,
use_tls=False, reattempt=True):
"""Connect to the server.
Setting ``reattempt`` to ``True`` will cause connection attempts to
be made every second until a successful connection is established.
:param host: The name of the desired server for the connection.
Defaults to :attr:`server_host`.
:param port: Port to connect to on the server.
Defauts to :attr:`server_port`.
:param use_ssl: Flag indicating if SSL should be used by connecting
directly to a port using SSL.
:param use_tls: Flag indicating if TLS should be used, allowing for
connecting to a port without using SSL immediately and
later upgrading the connection.
:param reattempt: Flag indicating if the socket should reconnect
after disconnections.
"""
if host is None:
host = self.server_host
if port is None:
port = self.server_port
self.server_name = self.boundjid.host
if use_tls:
log.info("XEP-0114 components can not use TLS")
log.debug("Connecting to %s:%s", host, port)
return XMLStream.connect(self, host=host, port=port,
use_ssl=use_ssl,
use_tls=False,
reattempt=reattempt)
def incoming_filter(self, xml):
"""
Pre-process incoming XML stanzas by converting any
``'jabber:client'`` namespaced elements to the component's
default namespace.
:param xml: The XML stanza to pre-process.
"""
if xml.tag.startswith('{jabber:client}'):
xml.tag = xml.tag.replace('jabber:client', self.default_ns)
# The incoming_filter call is only made on top level stanza
# elements. So we manually continue filtering on sub-elements.
for sub in xml:
self.incoming_filter(sub)
return xml
def start_stream_handler(self, xml):
"""
Once the streams are established, attempt to handshake
with the server to be accepted as a component.
:param xml: The incoming stream's root element.
"""
BaseXMPP.start_stream_handler(self, xml)
# Construct a hash of the stream ID and the component secret.
sid = xml.get('id', '')
pre_hash = '%s%s' % (sid, self.secret)
if sys.version_info >= (3, 0):
# Handle Unicode byte encoding in Python 3.
pre_hash = bytes(pre_hash, 'utf-8')
handshake = ET.Element('{jabber:component:accept}handshake')
handshake.text = hashlib.sha1(pre_hash).hexdigest().lower()
self.send_xml(handshake, now=True)
def _handle_handshake(self, xml):
"""The handshake has been accepted.
:param xml: The reply handshake stanza.
"""
self.session_bind_event = True
self.session_started_event = True
self.event("session_bind", self.boundjid, direct=True)
self.event("session_start")
def _handle_probe(self, pres):
self.roster[pres['to']][pres['from']].handle_probe(pres)
| 35.807229
| 78
| 0.607167
|
21b9e7f81e80f8665f9be602f7efc8f2b9ddaffa
| 972
|
py
|
Python
|
python/phonenumbers/shortdata/region_BG.py
|
vishnuku/python-phonenumbers
|
6ac2cdd06b7ccf709a8efb21629cf2c5f030e627
|
[
"Apache-2.0"
] | 3
|
2018-12-02T23:09:00.000Z
|
2018-12-02T23:16:59.000Z
|
python/phonenumbers/shortdata/region_BG.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/shortdata/region_BG.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
"""Auto-generated file, do not edit by hand. BG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BG = PhoneMetadata(id='BG', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_number_pattern='\\d{3,6}', possible_length=(3, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:000|111)', possible_number_pattern='\\d{6}', example_number='116000', possible_length=(6,)),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|50|6[06])', possible_number_pattern='\\d{3,6}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:2|6(?:000|111))|50|6[06])', possible_number_pattern='\\d{3,6}', example_number='112', possible_length=(3, 6)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
| 74.769231
| 174
| 0.751029
|
a305301340b64463104a9994ffa65b7e9bf3db20
| 9,035
|
py
|
Python
|
drex-atari/synthesize_rankings_bc.py
|
abalakrishna123/CoRL2019-DREX
|
b80b3308559261f7144d173ee293eb3c5b4a0502
|
[
"MIT"
] | 36
|
2019-11-02T02:48:47.000Z
|
2022-03-04T19:10:28.000Z
|
drex-atari/synthesize_rankings_bc.py
|
abalakrishna123/CoRL2019-DREX
|
b80b3308559261f7144d173ee293eb3c5b4a0502
|
[
"MIT"
] | 16
|
2019-10-31T16:19:34.000Z
|
2022-03-12T00:02:27.000Z
|
drex-atari/synthesize_rankings_bc.py
|
abalakrishna123/CoRL2019-DREX
|
b80b3308559261f7144d173ee293eb3c5b4a0502
|
[
"MIT"
] | 13
|
2019-11-06T08:32:06.000Z
|
2022-02-28T06:56:10.000Z
|
#from ale_wrapper import ALEInterfaceWrapper
from preprocess import Preprocessor
from state import *
import numpy as np
import utils
import gym
from baselines.ppo2.model import Model
from baselines.common.policies import build_policy
from baselines.common.cmd_util import make_vec_env
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.vec_env.vec_normalize import VecNormalize
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
import torch
import argparse
import numpy as np
from train import train
from pdb import set_trace
import dataset
import tensorflow as tf
from bc import Clone
from baselines.common.trex_utils import preprocess
class DemoGenerator:
def __init__(self, agent, env_name, num_eval_episodes, seed):
self.agent = agent
self.env_name = env_name
self.num_eval_episodes = num_eval_episodes
self.seed = seed
if env_name == "spaceinvaders":
env_id = "SpaceInvadersNoFrameskip-v4"
elif env_name == "mspacman":
env_id = "MsPacmanNoFrameskip-v4"
elif env_name == "videopinball":
env_id = "VideoPinballNoFrameskip-v4"
elif env_name == "beamrider":
env_id = "BeamRiderNoFrameskip-v4"
else:
env_id = env_name[0].upper() + env_name[1:] + "NoFrameskip-v4"
env_type = "atari"
#env id, env type, num envs, and seed
env = make_vec_env(env_id, env_type, 1, seed, wrapper_kwargs={'clip_rewards':False,'episode_life':False,})
if env_type == 'atari':
env = VecFrameStack(env, 4)
print("env actions", env.action_space)
self.env = env
def get_pseudo_rankings(self, epsilon_greedy_list, add_noop=False, returns=False):
ranked_batches = []
batch_returns = []
if add_noop:
#generate noop demonstrations by doing nothing for a while
demo_noop = self.generate_noop_demo(self.env)
ranked_batches.append(demo_noop)
for epsilon_greedy in epsilon_greedy_list:
demo_batch, demo_returns = self.generate_demos(self.env, self.agent, epsilon_greedy)
ranked_batches.append(demo_batch)
batch_returns.append(demo_returns)
if returns:
return ranked_batches, batch_returns
else:
return ranked_batches
def get_pseudo_ranking_returns(self, epsilon_greedy_list):
batch_returns = []
for epsilon_greedy in epsilon_greedy_list:
batch = self.generate_returns(self.env, self.agent, epsilon_greedy)
batch_returns.append(batch)
return batch_returns
def generate_noop_demo(self, env):
print("Generating demos for noop agent")
noop_action = 0
rewards = []
# 100 episodes
episode_count = 4
reward = 0
done = False
rewards = []
cum_steps = []
demos = []
#writer = open(self.checkpoint_dir + "/" +self.env_name + "_bc_results.txt", 'w')
for i in range(int(episode_count)):
ob = env.reset()
steps = 0
acc_reward = 0
traj = []
while True:
#preprocess the state
state = preprocess(ob, self.env_name)
traj.append(state)
state = np.transpose(state, (0, 3, 1, 2))
ob, reward, done, _ = env.step(noop_action)
steps += 1
acc_reward += reward
if done or steps > 500:
print("Episode: {}, Steps: {}, Reward: {}".format(i,steps,acc_reward))
#writer.write("{}\n".format(acc_reward[0]))
rewards.append(acc_reward)
cum_steps.append(steps)
break
demos.append(traj)
print("Mean reward is: " + str(np.mean(rewards)))
print("Mean step length is: " + str(np.mean(cum_steps)))
return demos
def generate_demos(self, env, agent, epsilon_greedy):
print("Generating demos for epsilon=",epsilon_greedy)
rewards = []
# 100 episodes
episode_count = self.num_eval_episodes
reward = 0
done = False
rewards = []
cum_steps = []
demos = []
#writer = open(self.checkpoint_dir + "/" +self.env_name + "_bc_results.txt", 'w')
for i in range(int(episode_count)):
ob = env.reset()
steps = 0
acc_reward = 0
traj = []
while True:
#preprocess the state
state = preprocess(ob, self.env_name)
traj.append(state)
state = np.transpose(state, (0, 3, 1, 2))
if np.random.rand() < epsilon_greedy:
#print('eps greedy action')
action = env.action_space.sample()
else:
#print('policy action')
action = agent.get_action(state)
ob, reward, done, _ = env.step(action)
steps += 1
acc_reward += reward
if done:
print("Episode: {}, Steps: {}, Reward: {}".format(i,steps,acc_reward))
#writer.write("{}\n".format(acc_reward[0]))
rewards.append(acc_reward)
cum_steps.append(steps)
break
print("traj length", len(traj))
demos.append(traj)
print("demo len", len(demos))
print("Mean reward is: " + str(np.mean(rewards)))
print("Mean step length is: " + str(np.mean(cum_steps)))
return demos, rewards
def generate_returns(self, env, agent, epsilon_greedy):
print("Generating returns for epsilon=",epsilon_greedy)
rewards = []
# 100 episodes
episode_count = self.num_eval_episodes
reward = 0
done = False
rewards = []
cum_steps = []
#writer = open(self.checkpoint_dir + "/" +self.env_name + "_bc_results.txt", 'w')
for i in range(int(episode_count)):
ob = env.reset()
steps = 0
acc_reward = 0
while True:
#preprocess the state
state = preprocess(ob, self.env_name)
state = np.transpose(state, (0, 3, 1, 2))
if np.random.rand() < epsilon_greedy:
#print('eps greedy action')
action = env.action_space.sample()
else:
#print('policy action')
action = agent.get_action(state)
ob, reward, done, _ = env.step(action)
steps += 1
acc_reward += reward
if done:
print("Episode: {}, Steps: {}, Reward: {}".format(i,steps,acc_reward))
#writer.write("{}\n".format(acc_reward[0]))
rewards.append(acc_reward)
cum_steps.append(steps)
break
print("Mean reward is: " + str(np.mean(rewards)))
print("Mean step length is: " + str(np.mean(cum_steps)))
return rewards
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# ##################################################
# ## Algorithm parameters ##
# ##################################################
#parser.add_argument("--dataset-size", type=int, default=75000)
#parser.add_argument("--updates", type=int, default=10000)#200000)
parser.add_argument("--env_name", type=str, help="Atari environment name in lowercase, i.e. 'beamrider'")
parser.add_argument("--checkpoint_policy", type=str)
parser.add_argument("--num_eval_episodes", type=int, default = 20)
parser.add_argument('--seed', default=0, help="random seed for experiments")
epsilon_greedy_list = [0.01, 0.1, 0.3, 0.5, 1.0]
hist_length = 4
args = parser.parse_args()
seed = int(args.seed)
print("seed", seed)
torch.manual_seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
env_name = args.env_name
if env_name == "spaceinvaders":
env_id = "SpaceInvadersNoFrameskip-v4"
elif env_name == "mspacman":
env_id = "MsPacmanNoFrameskip-v4"
elif env_name == "videopinball":
env_id = "VideoPinballNoFrameskip-v4"
elif env_name == "beamrider":
env_id = "BeamRiderNoFrameskip-v4"
else:
env_id = env_name[0].upper() + env_name[1:] + "NoFrameskip-v4"
env_type = "atari"
#TODO: minimal action set from env
minimal_action_set = [0,1,2,3]
agent = Clone(list(minimal_action_set), hist_length, args.checkpoint_policy)
print("beginning evaluation")
generator = DemoGenerator(agent, env_name, args.num_eval_episodes, seed)
ranked_demos = generator.get_pseudo_rankings(epsilon_greedy_list)
print(len(ranked_demos))
| 36.285141
| 114
| 0.573437
|
12cac85bfbdb017ea644586e6bbd09049ff8f025
| 289
|
py
|
Python
|
core/messaging/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | 3
|
2017-10-28T11:28:38.000Z
|
2018-09-12T09:47:00.000Z
|
core/messaging/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
core/messaging/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
from core.messaging.builder import StringBuilder
from core.messaging.pronouns import His, Him, He
from core.messaging.variables import Targets, Defender, Attacker, Actor, TargetTwo, TargetOne, Target, AttackerWeapon, \
Ammunition, MessageVariable
from core.messaging.verbs import Verb
| 48.166667
| 120
| 0.820069
|
ee35f29c85ce9c4710d3c1eaca57c5f55eac687a
| 2,059
|
py
|
Python
|
openstack/network/v2/security_group.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
openstack/network/v2/security_group.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
openstack/network/v2/security_group.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import _base
from openstack import resource
class SecurityGroup(_base.NetworkResource, resource.TagMixin):
resource_key = 'security_group'
resources_key = 'security_groups'
base_path = '/security-groups'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'description', 'fields', 'id', 'name', 'stateful', 'project_id',
'tenant_id', 'revision_number', 'sort_dir', 'sort_key',
**resource.TagMixin._tag_query_parameters
)
# Properties
#: Timestamp when the security group was created.
created_at = resource.Body('created_at')
#: The security group description.
description = resource.Body('description')
#: The security group name.
name = resource.Body('name')
#: Whether the security group is stateful or not.
stateful = resource.Body('stateful')
#: The ID of the project this security group is associated with.
project_id = resource.Body('project_id')
#: A list of
#: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule`
#: objects. *Type: list*
security_group_rules = resource.Body('security_group_rules', type=list)
#: The ID of the project this security group is associated with.
tenant_id = resource.Body('tenant_id')
#: Timestamp when the security group was last updated.
updated_at = resource.Body('updated_at')
| 38.12963
| 75
| 0.717339
|
0c2576361d9c166d4e3249a718b0c51f963832f6
| 9,071
|
py
|
Python
|
env/lib/python3.6/site-packages/wheel/pep425tags.py
|
amogh-gulati/corona_dashboard
|
ce1a20ad56bdfb758d41513b4706fe3a47764c32
|
[
"MIT"
] | 91
|
2019-01-28T13:32:14.000Z
|
2021-12-08T03:23:56.000Z
|
env/lib/python3.6/site-packages/wheel/pep425tags.py
|
amogh-gulati/corona_dashboard
|
ce1a20ad56bdfb758d41513b4706fe3a47764c32
|
[
"MIT"
] | 64
|
2021-06-03T13:24:12.000Z
|
2022-03-31T19:11:41.000Z
|
env/lib/python3.6/site-packages/wheel/pep425tags.py
|
amogh-gulati/corona_dashboard
|
ce1a20ad56bdfb758d41513b4706fe3a47764c32
|
[
"MIT"
] | 37
|
2020-07-09T23:12:30.000Z
|
2022-03-16T11:15:58.000Z
|
"""Generate and work with PEP 425 Compatibility Tags."""
import distutils.util
import platform
import sys
import os
import sysconfig
import warnings
from .macosx_libfile import extract_macosx_min_system_version
try:
from importlib.machinery import all_suffixes as get_all_suffixes
except ImportError:
from imp import get_suffixes
def get_all_suffixes():
return [suffix[0] for suffix in get_suffixes()]
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
impl = platform.python_implementation()
if impl == 'PyPy':
return 'pp'
elif impl == 'Jython':
return 'jy'
elif impl == 'IronPython':
return 'ip'
elif impl == 'CPython':
return 'cp'
raise LookupError('Unknown Python implementation: ' + impl)
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver:
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp' and
sys.version_info < (3, 8))) \
and sys.version_info < (3, 8):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def calculate_macosx_platform_tag(archive_root, platform_tag):
"""
Calculate proper macosx platform tag basing on files which are included to wheel
Example platform tag `macosx-10.14-x86_64`
"""
prefix, base_version, suffix = platform_tag.split('-')
base_version = tuple([int(x) for x in base_version.split(".")])
if len(base_version) >= 2:
base_version = base_version[0:2]
assert len(base_version) == 2
if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
deploy_target = tuple([int(x) for x in os.environ[
"MACOSX_DEPLOYMENT_TARGET"].split(".")])
if len(deploy_target) >= 2:
deploy_target = deploy_target[0:2]
if deploy_target < base_version:
sys.stderr.write(
"[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than the "
"version on which the Python interpreter was compiled ({}), and will be "
"ignored.\n".format('.'.join(str(x) for x in deploy_target),
'.'.join(str(x) for x in base_version))
)
else:
base_version = deploy_target
assert len(base_version) == 2
start_version = base_version
versions_dict = {}
for (dirpath, dirnames, filenames) in os.walk(archive_root):
for filename in filenames:
if filename.endswith('.dylib') or filename.endswith('.so'):
lib_path = os.path.join(dirpath, filename)
min_ver = extract_macosx_min_system_version(lib_path)
if min_ver is not None:
versions_dict[lib_path] = min_ver[0:2]
if len(versions_dict) > 0:
base_version = max(base_version, max(versions_dict.values()))
# macosx platform tag do not support minor bugfix release
fin_base_version = "_".join([str(x) for x in base_version])
if start_version < base_version:
problematic_files = [k for k, v in versions_dict.items() if v > start_version]
problematic_files = "\n".join(problematic_files)
if len(problematic_files) == 1:
files_form = "this file"
else:
files_form = "these files"
error_message = \
"[WARNING] This wheel needs a higher macOS version than {} " \
"To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least " +\
fin_base_version + " or recreate " + files_form + " with lower " \
"MACOSX_DEPLOYMENT_TARGET: \n" + problematic_files
if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
error_message = error_message.format("is set in MACOSX_DEPLOYMENT_TARGET variable.")
else:
error_message = error_message.format(
"the version your Python interpreter is compiled against.")
sys.stderr.write(error_message)
platform_tag = prefix + "_" + fin_base_version + "_" + suffix
return platform_tag
def get_platform(archive_root):
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform()
if result.startswith("macosx") and archive_root is not None:
result = calculate_macosx_platform_tag(archive_root, result)
result = result.replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_supported(archive_root, versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
for suffix in get_all_suffixes():
if suffix.startswith('.abi'):
abi3s.add(suffix.split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform(archive_root))
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in ('31', '30'):
break
for abi in abi3s: # empty set if not Python 3
for arch in platforms:
supported.append(("%s%s" % (impl, version), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
for arch in platforms:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
| 34.622137
| 96
| 0.60258
|
74fed75c774212a9771295792e865d94cd7ee164
| 1,894
|
py
|
Python
|
novaclient/v1_1/cloudpipe.py
|
bclau/python-novaclient
|
7d1cd188e0da0e14b58ead3e46c2f1461620c03c
|
[
"Apache-1.1"
] | 1
|
2015-02-16T09:37:00.000Z
|
2015-02-16T09:37:00.000Z
|
novaclient/v1_1/cloudpipe.py
|
bclau/python-novaclient
|
7d1cd188e0da0e14b58ead3e46c2f1461620c03c
|
[
"Apache-1.1"
] | null | null | null |
novaclient/v1_1/cloudpipe.py
|
bclau/python-novaclient
|
7d1cd188e0da0e14b58ead3e46c2f1461620c03c
|
[
"Apache-1.1"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cloudpipe interface."""
from novaclient import base
class Cloudpipe(base.Resource):
"""A cloudpipe instance is a VPN attached to a proejct's VLAN."""
def __repr__(self):
return "<Cloudpipe: %s>" % self.project_id
def delete(self):
self.manager.delete(self)
class CloudpipeManager(base.ManagerWithFind):
resource_class = Cloudpipe
def create(self, project):
"""
Launch a cloudpipe instance.
:param project: UUID of the project (tenant) for the cloudpipe
"""
body = {'cloudpipe': {'project_id': project}}
return self._create('/os-cloudpipe', body, 'instance_id',
return_raw=True)
def list(self):
"""
Get a list of cloudpipe instances.
"""
return self._list('/os-cloudpipe', 'cloudpipes')
def update(self, address, port):
"""
Update VPN address and port for all networks associated
with the project defined by authentication
:param address: IP address
:param port: Port number
"""
body = {'configure_project': {'vpn_ip': address,
'vpn_port': port}}
self._update("/os-cloudpipe/configure-project", body)
| 30.548387
| 78
| 0.640444
|
3188c2acdebc5d6e43cd11cb7ee1fddddaa14481
| 823
|
py
|
Python
|
python/pygtk/python_gtk3_pygobject/label_html.py
|
jeremiedecock/snippets
|
4bd4e7f459eee610d5cf19f845299ca942ff4b64
|
[
"MIT"
] | 23
|
2015-06-08T13:01:00.000Z
|
2021-12-30T08:20:04.000Z
|
python/pygtk/python_gtk3_pygobject/label_html.py
|
jeremiedecock/snippets
|
4bd4e7f459eee610d5cf19f845299ca942ff4b64
|
[
"MIT"
] | 1
|
2020-10-22T02:36:10.000Z
|
2020-10-22T02:36:10.000Z
|
python/pygtk/python_gtk3_pygobject/label_html.py
|
jeremiedecock/snippets
|
4bd4e7f459eee610d5cf19f845299ca942ff4b64
|
[
"MIT"
] | 7
|
2017-10-31T09:48:14.000Z
|
2022-01-04T15:59:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
This is the simplest Python GTK+3 snippet.
See: http://python-gtk-3-tutorial.readthedocs.org/en/latest/label.html
"""
from gi.repository import Gtk as gtk
def main():
window = gtk.Window()
label = gtk.Label()
label.set_markup('Text can be <small>small</small>, <big>big</big>, <b>bold</b>, <i>italic</i> and even point to somewhere <a href="http://www.jdhp.org">www.jdhp.org</a>.')
window.add(label)
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
| 29.392857
| 176
| 0.616039
|
448e45d84dc3a56a968077253787a405bf1a80dc
| 16,751
|
py
|
Python
|
pyvcloud/vcd/utils.py
|
pacogomez/pyvcloud
|
731aded20b999d269472caf65df774c284dd49b6
|
[
"Apache-2.0"
] | null | null | null |
pyvcloud/vcd/utils.py
|
pacogomez/pyvcloud
|
731aded20b999d269472caf65df774c284dd49b6
|
[
"Apache-2.0"
] | 1
|
2017-12-28T13:50:54.000Z
|
2017-12-28T17:28:15.000Z
|
pyvcloud/vcd/utils.py
|
pacogomez/pyvcloud
|
731aded20b999d269472caf65df774c284dd49b6
|
[
"Apache-2.0"
] | 1
|
2017-12-28T10:22:55.000Z
|
2017-12-28T10:22:55.000Z
|
# VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import humanfriendly
from lxml import etree
from lxml.objectify import NoneElement
from pygments import formatters
from pygments import highlight
from pygments import lexers
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import get_links
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import VCLOUD_STATUS_MAP
def extract_id(urn):
if urn is None:
return None
if ':' in urn:
return urn.split(':')[-1]
else:
return urn
def org_to_dict(org):
result = {}
result['name'] = org.get('name')
result['id'] = extract_id(org.get('id'))
result['full_name'] = str('%s' % org.FullName)
result['description'] = str('%s' % org.Description)
result['vdcs'] = [
str(n.name) for n in get_links(org, media_type=EntityType.VDC.value)
]
result['org_networks'] = [
str(n.name)
for n in get_links(org, media_type=EntityType.ORG_NETWORK.value)
]
result['catalogs'] = [
str(n.name)
for n in get_links(org, media_type=EntityType.CATALOG.value)
]
return result
def vdc_to_dict(vdc, access_control_settings=None):
result = {}
result['name'] = vdc.get('name')
result['id'] = extract_id(vdc.get('id'))
if hasattr(vdc, 'IsEnabled'):
result['is_enabled'] = bool(vdc.IsEnabled)
if hasattr(vdc, 'AvailableNetworks') and \
hasattr(vdc.AvailableNetworks, 'Network'):
result['networks'] = []
for n in vdc.AvailableNetworks.Network:
result['networks'].append(n.get('name'))
if hasattr(vdc, 'ComputeCapacity'):
result['cpu_capacity'] = {
'units': str(vdc.ComputeCapacity.Cpu.Units),
'allocated': str(vdc.ComputeCapacity.Cpu.Allocated),
'limit': str(vdc.ComputeCapacity.Cpu.Limit),
'reserved': str(vdc.ComputeCapacity.Cpu.Reserved),
'used': str(vdc.ComputeCapacity.Cpu.Used),
'overhead': str(vdc.ComputeCapacity.Cpu.Overhead)
}
result['mem_capacity'] = {
'units': str(vdc.ComputeCapacity.Memory.Units),
'allocated': str(vdc.ComputeCapacity.Memory.Allocated),
'limit': str(vdc.ComputeCapacity.Memory.Limit),
'reserved': str(vdc.ComputeCapacity.Memory.Reserved),
'used': str(vdc.ComputeCapacity.Memory.Used),
'overhead': str(vdc.ComputeCapacity.Memory.Overhead)
}
if hasattr(vdc, 'AllocationModel'):
result['allocation_model'] = str(vdc.AllocationModel)
if hasattr(vdc, 'VmQuota'):
result['vm_quota'] = int(vdc.VmQuota)
if hasattr(vdc, 'Capabilities') and \
hasattr(vdc.Capabilities, 'SupportedHardwareVersions') and \
hasattr(vdc.Capabilities.SupportedHardwareVersions,
'SupportedHardwareVersion'):
result['supported_hw'] = []
for n in vdc.Capabilities.SupportedHardwareVersions. \
SupportedHardwareVersion:
result['supported_hw'].append(str(n))
if hasattr(vdc, 'ResourceEntities') and \
hasattr(vdc.ResourceEntities, 'ResourceEntity'):
result['vapps'] = []
result['vapp_templates'] = []
for n in vdc.ResourceEntities.ResourceEntity:
if n.get('type') == EntityType.VAPP.value:
result['vapps'].append(n.get('name'))
elif n.get('type') == EntityType.VAPP_TEMPLATE.value:
result['vapp_templates'].append(n.get('name'))
if hasattr(vdc, 'VdcStorageProfiles') and \
hasattr(vdc.VdcStorageProfiles, 'VdcStorageProfile'):
result['storage_profiles'] = []
for sp in vdc.VdcStorageProfiles.VdcStorageProfile:
result['storage_profiles'].append(sp.get('name'))
if access_control_settings is not None:
result.update(access_control_settings)
return result
def to_human(seconds):
weeks = seconds / (7 * 24 * 60 * 60)
days = seconds / (24 * 60 * 60) - 7 * weeks
hours = seconds / (60 * 60) - 7 * 24 * weeks - 24 * days
return '%sw, %sd, %sh' % (weeks, days, hours)
def vapp_to_dict(vapp, metadata=None, access_control_settings=None):
result = {}
result['name'] = vapp.get('name')
result['id'] = extract_id(vapp.get('id'))
if 'ownerName' in vapp:
result['owner'] = [vapp.get('ownerName')]
if hasattr(vapp, 'Owner') and hasattr(vapp.Owner, 'User'):
result['owner'] = []
for user in vapp.Owner.User:
result['owner'].append(user.get('name'))
items = vapp.xpath('//ovf:NetworkSection/ovf:Network', namespaces=NSMAP)
n = 0
for item in items:
n += 1
network_name = item.get('{http://schemas.dmtf.org/ovf/envelope/1}name')
result['vapp-net-%s' % n] = network_name
if hasattr(vapp, 'NetworkConfigSection'):
for nc in vapp.NetworkConfigSection.NetworkConfig:
if nc.get('networkName') == network_name:
result['vapp-net-%s-mode' % n] = \
nc.Configuration.FenceMode.text
if hasattr(vapp, 'LeaseSettingsSection'):
if hasattr(vapp.LeaseSettingsSection, 'DeploymentLeaseInSeconds'):
result['deployment_lease'] = to_human(
int(vapp.LeaseSettingsSection.DeploymentLeaseInSeconds))
if hasattr(vapp.LeaseSettingsSection, 'StorageLeaseInSeconds'):
result['storage_lease'] = to_human(
int(vapp.LeaseSettingsSection.StorageLeaseInSeconds))
if hasattr(vapp.LeaseSettingsSection, 'DeploymentLeaseExpiration'):
result['deployment_lease_expiration'] = \
vapp.LeaseSettingsSection.DeploymentLeaseExpiration
if hasattr(vapp, 'Children') and hasattr(vapp.Children, 'Vm'):
n = 0
for vm in vapp.Children.Vm:
n += 1
k = 'vm-%s' % n
result[k + ': name'] = vm.get('name')
items = vm.xpath(
'ovf:VirtualHardwareSection/ovf:Item', namespaces=NSMAP)
for item in items:
element_name = item.find('rasd:ElementName', NSMAP)
connection = item.find('rasd:Connection', NSMAP)
if connection is None:
quantity = item.find('rasd:VirtualQuantity', NSMAP)
if quantity is None or isinstance(quantity, NoneElement):
value = item.find('rasd:Description', NSMAP)
else:
units = item.find('rasd:VirtualQuantityUnits', NSMAP)
if isinstance(units, NoneElement):
units = ''
value = '{:,} {}'.format(int(quantity), units).strip()
else:
value = '{}: {}'.format(
connection.get(
'{' + NSMAP['vcloud'] + '}ipAddressingMode'),
connection.get('{' + NSMAP['vcloud'] + '}ipAddress'))
result['%s: %s' % (k, element_name)] = value
env = vm.xpath('ovfenv:Environment', namespaces=NSMAP)
if len(env) > 0:
result['%s: %s' %
(k,
'moid')] = env[0].get('{' + NSMAP['ve'] + '}vCenterId')
if hasattr(vm, 'StorageProfile'):
result['%s: %s' % (k, 'storage-profile')] = \
vm.StorageProfile.get('name')
if hasattr(vm, 'GuestCustomizationSection'):
if hasattr(vm.GuestCustomizationSection, 'AdminPassword'):
element_name = 'password'
value = vm.GuestCustomizationSection.AdminPassword
result['%s: %s' % (k, element_name)] = value
if hasattr(vm.GuestCustomizationSection, 'ComputerName'):
element_name = 'computer-name'
value = vm.GuestCustomizationSection.ComputerName
result['%s: %s' % (k, element_name)] = value
if hasattr(vm, 'NetworkConnectionSection'):
ncs = vm.NetworkConnectionSection
if 'PrimaryNetworkConnectionIndex' in ncs:
result['%s: %s' % (k, 'primary-net')] = \
ncs.PrimaryNetworkConnectionIndex.text
if 'NetworkConnection' in ncs:
for nc in ncs.NetworkConnection:
nci = nc.NetworkConnectionIndex.text
result['%s: net-%s' % (k, nci)] = nc.get('network')
result['%s: net-%s-mode' % (k, nci)] = \
nc.IpAddressAllocationMode.text
result['%s: net-%s-connected' % (k, nci)] = \
nc.IsConnected.text
if hasattr(nc, 'MACAddress'):
result['%s: net-%s-mac' % (k, nci)] = \
nc.MACAddress.text
if hasattr(nc, 'IpAddress'):
result['%s: net-%s-ip' % (k,
nci)] = nc.IpAddress.text
if 'VmSpecSection' in vm:
for setting in vm.VmSpecSection.DiskSection.DiskSettings:
if hasattr(setting, 'Disk'):
result['%s: attached-disk-%s-name' %
(k, setting.DiskId.text)] = \
'%s' % (setting.Disk.get('name'))
result['%s: attached-disk-%s-size-Mb' %
(k, setting.DiskId.text)] = \
'%s' % (setting.SizeMb.text)
result['%s: attached-disk-%s-bus' %
(k, setting.DiskId.text)] = \
'%s' % (setting.BusNumber.text)
result['%s: attached-disk-%s-unit' %
(k, setting.DiskId.text)] = \
'%s' % (setting.UnitNumber.text)
result['status'] = VCLOUD_STATUS_MAP.get(int(vapp.get('status')))
if access_control_settings is not None:
result.update(access_control_settings)
if metadata is not None and hasattr(metadata, 'MetadataEntry'):
for me in metadata.MetadataEntry:
result['metadata: %s' % me.Key.text] = me.TypedValue.Value.text
return result
def task_to_dict(task):
result = to_dict(task)
if hasattr(task, 'Owner'):
result['owner_name'] = task.Owner.get('name')
result['owner_href'] = task.Owner.get('href')
result['owner_type'] = task.Owner.get('type')
if hasattr(task, 'User'):
result['user'] = task.User.get('name')
if hasattr(task, 'Organization'):
result['organization'] = task.Organization.get('name')
if hasattr(task, 'Details'):
result['details'] = task.Details
return result
def disk_to_dict(disk):
result = {}
result['name'] = disk.get('name')
result['id'] = extract_id(disk.get('id'))
result['status'] = disk.get('status')
result['size'] = humanfriendly.format_size(int(disk.get('size')))
result['size_bytes'] = disk.get('size')
result['busType'] = disk.get('busType')
result['busSubType'] = disk.get('busSubType')
result['iops'] = disk.get('iops')
if hasattr(disk, 'Owner'):
result['owner'] = disk.Owner.User.get('name')
if hasattr(disk, 'Description'):
result['description'] = disk.Description
if hasattr(disk, 'StorageProfile'):
result['storageProfile'] = disk.StorageProfile.get('name')
if hasattr(disk, 'attached_vms') and \
hasattr(disk.attached_vms, 'VmReference'):
result['vms_attached'] = disk.attached_vms.VmReference.get('name')
result['vms_attached_id'] = disk.attached_vms.VmReference.get(
'href').split('/vm-')[-1]
return result
def access_control_settings_to_dict(access_control_settings):
"""Convert access control settings to dict.
:param access_control_settings: (ControlAccessParamsType): xml object
representing access control settings.
:return: (dict): dict representation of access control settings.
"""
result = {}
if hasattr(access_control_settings, 'IsSharedToEveryone'):
result['is_shared_to_everyone'] = access_control_settings[
'IsSharedToEveryone']
if hasattr(access_control_settings, 'EveryoneAccessLevel'):
result['everyone_access_level'] = access_control_settings[
'EveryoneAccessLevel']
if hasattr(access_control_settings, 'AccessSettings') and \
hasattr(access_control_settings.AccessSettings,
'AccessSetting') and \
len(access_control_settings.AccessSettings.AccessSetting) > 0:
n = 1
for access_setting in list(
access_control_settings.AccessSettings.AccessSetting):
access_str = 'access_settings'
if hasattr(access_setting, 'Subject'):
result['%s_%s_subject_name' % (access_str, n)] = \
access_setting.Subject.get('name')
if hasattr(access_setting, 'Subject'):
result['%s_%s_subject_href' % (access_str, n)] = \
access_setting.Subject.get('href')
if hasattr(access_setting, 'Subject'):
result['%s_%s_subject_type' % (access_str, n)] = \
access_setting.Subject.get('type')
if hasattr(access_setting, 'AccessLevel'):
result['%s_%s_access_level' % (access_str, n)] = \
access_setting.AccessLevel
n += 1
return result
def filter_attributes(resource_type):
attributes = None
if resource_type in ['adminTask', 'task']:
attributes = ['id', 'name', 'objectName', 'status', 'startDate']
elif resource_type in ['adminVApp', 'vApp']:
attributes = [
'id', 'name', 'numberOfVMs', 'status', 'numberOfCpus',
'memoryAllocationMB', 'storageKB', 'ownerName', 'isDeployed',
'isEnabled', 'vdcName'
]
elif resource_type in ['adminCatalogItem', 'catalogItem']:
attributes = [
'id', 'name', 'catalogName', 'storageKB', 'status', 'entityType',
'vdcName', 'isPublished', 'ownerName'
]
return attributes
def to_dict(obj, attributes=None, resource_type=None, exclude=['href',
'type']):
if obj is None:
return {}
result = {}
attributes_res = filter_attributes(resource_type)
if attributes:
for attr in attributes:
result[attr] = None
if attributes_res:
for attr in attributes_res:
result[attr] = None
for attr in obj.attrib:
flag = False
if attributes:
flag = attr in attributes
elif attributes_res:
flag = attr in attributes_res
else:
flag = True
if flag:
if attr == 'id':
result[attr] = extract_id(obj.get(attr))
else:
result[attr] = obj.get(attr)
if hasattr(obj, '__dict__'):
for key in obj.__dict__:
result[key] = obj[key].text
for e in exclude:
if e in result.keys():
result.pop(e)
return result
def to_camel_case(name, names):
result = name
for n in names:
if name.lower() == n.lower():
return n
return result
def stdout_xml(the_xml, is_colorized=True):
message = str(etree.tostring(the_xml, pretty_print=True), 'utf-8')
if is_colorized:
print(
highlight(message, lexers.XmlLexer(),
formatters.TerminalFormatter()))
else:
print(message)
def get_admin_href(href):
return href.replace('/api/', '/api/admin/')
def get_admin_extension_href(href):
if '/api/admin/' in href:
return href.replace('/api/admin/', '/api/admin/extension/')
else:
return href.replace('/api/', '/api/admin/extension/')
| 42.08794
| 79
| 0.57304
|
980235cc84ba3dd2f329f563165fa3df745655b0
| 86,968
|
py
|
Python
|
Pico_Buzzer_Music_demo2_main.py
|
geeekpi/rpico
|
b01151fc3dfdd57a9997a2f35662c21d8882eff2
|
[
"MIT"
] | 4
|
2021-07-12T18:27:42.000Z
|
2022-01-14T21:15:11.000Z
|
Pico_Buzzer_Music_demo2_main.py
|
geeekpi/rpico
|
b01151fc3dfdd57a9997a2f35662c21d8882eff2
|
[
"MIT"
] | null | null | null |
Pico_Buzzer_Music_demo2_main.py
|
geeekpi/rpico
|
b01151fc3dfdd57a9997a2f35662c21d8882eff2
|
[
"MIT"
] | null | null | null |
from buzzer_music import music
from time import sleep
#Example songs
# https://onlinesequencer.net/195547
#song = '0 A#4 1 1;2 F5 1 1;4 D#5 1 1;8 D5 1 1;11 D5 1 1;6 A#4 1 1;14 D#5 1 1;18 A#4 1 1;20 D#5 1 1;22 A#4 1 1;24 D5 1 1;27 D5 1 1;30 D#5 1 1;32 A#4 1 1;34 F5 1 1;36 D#5 1 1;38 A#4 1 1;40 D5 1 1;43 D5 1 1;46 D#5 1 1;50 A#4 1 1;52 D#5 1 1;54 G5 1 1;56 F5 1 1;59 D#5 1 1;62 F5 1 1;64 A#4 1 1;66 F5 1 1;68 D#5 1 1;70 A#4 1 1;72 D5 1 1;75 D5 1 1;78 D#5 1 1;82 A#4 1 1;84 D#5 1 1;86 A#4 1 1;88 D5 1 1;91 D5 1 1;94 D#5 1 1;96 A#4 1 1;100 D#5 1 1;102 A#4 1 1;104 D5 1 1;107 D5 1 1;110 D#5 1 1;114 A#4 1 1;116 D#5 1 1;118 G5 1 1;120 F5 1 1;123 D#5 1 1;126 F5 1 1;98 F5 1 1'
# https://onlinesequencer.net/1864273
#song = '0 D5 4 14;4 A5 4 14;8 C6 4 14;12 B5 4 14;16 G5 2 14;18 F5 2 14;20 E5 2 14;22 F5 2 14;24 G5 8 14;4 E5 8 16;4 C5 8 16;4 F4 8 16;12 D5 8 16;12 B4 8 16;12 E4 8 16;20 C5 8 16;20 A4 8 16;20 D4 8 16;0 E4 4 16;0 B4 4 16;28 E4 4 16;28 B4 4 16'
# https://onlinesequencer.net/1864297 - Tetris
#song = '0 E3 1 0;2 E4 1 0;4 E3 1 0;6 E4 1 0;8 E3 1 0;10 E4 1 0;12 E3 1 0;14 E4 1 0;16 A3 1 0;18 A4 1 0;20 A3 1 0;22 A4 1 0;24 A3 1 0;26 A4 1 0;28 A3 1 0;30 A4 1 0;32 G#3 1 0;34 G#4 1 0;36 G#3 1 0;38 G#4 1 0;40 E3 1 0;42 E4 1 0;44 E3 1 0;46 E4 1 0;48 A3 1 0;50 A4 1 0;52 A3 1 0;54 A4 1 0;56 A3 1 0;58 B3 1 0;60 C4 1 0;62 D4 1 0;64 D3 1 0;66 D4 1 0;68 D3 1 0;70 D4 1 0;72 D3 1 0;74 D4 1 0;76 D3 1 0;78 D4 1 0;80 C3 1 0;82 C4 1 0;84 C3 1 0;86 C4 1 0;88 C3 1 0;90 C4 1 0;92 C3 1 0;94 C4 1 0;96 G2 1 0;98 G3 1 0;100 G2 1 0;102 G3 1 0;104 E3 1 0;106 E4 1 0;108 E3 1 0;110 E4 1 0;114 A4 1 0;112 A3 1 0;116 A3 1 0;118 A4 1 0;120 A3 1 0;122 A4 1 0;124 A3 1 0;0 E6 1 1;4 B5 1 1;6 C6 1 1;8 D6 1 1;10 E6 1 1;11 D6 1 1;12 C6 1 1;14 B5 1 1;0 E5 1 6;4 B4 1 6;6 C5 1 6;8 D5 1 6;10 E5 1 6;11 D5 1 6;12 C5 1 6;14 B4 1 6;16 A5 1 1;20 A5 1 1;22 C6 1 1;24 E6 1 1;28 D6 1 1;30 C6 1 1;32 B5 1 1;36 B5 1 1;36 B5 1 1;37 B5 1 1;38 C6 1 1;40 D6 1 1;44 E6 1 1;48 C6 1 1;52 A5 1 1;56 A5 1 1;20 A4 1 6;16 A4 1 6;22 C5 1 6;24 E5 1 6;28 D5 1 6;30 C5 1 6;32 B4 1 6;36 B4 1 6;37 B4 1 6;38 C5 1 6;40 D5 1 6;44 E5 1 6;48 C5 1 6;52 A4 1 6;56 A4 1 6;64 D5 1 6;64 D6 1 1;68 D6 1 1;70 F6 1 1;72 A6 1 1;76 G6 1 1;78 F6 1 1;80 E6 1 1;84 E6 1 1;86 C6 1 1;88 E6 1 1;92 D6 1 1;94 C6 1 1;96 B5 1 1;100 B5 1 1;101 B5 1 1;102 C6 1 1;104 D6 1 1;108 E6 1 1;112 C6 1 1;116 A5 1 1;120 A5 1 1;72 A5 1 6;80 E5 1 6;68 D5 1 7;70 F5 1 7;76 G5 1 7;84 E5 1 7;78 F5 1 7;86 C5 1 7;88 E5 1 6;96 B4 1 6;104 D5 1 6;112 C5 1 6;120 A4 1 6;92 D5 1 7;94 C5 1 7;100 B4 1 7;101 B4 1 7;102 C5 1 7;108 E5 1 7;116 A4 1 7'
# https://onlinesequencer.net/1210132
#song = '4 C#5 1 0;8 D#5 1 0;12 C5 1 0;16 C#5 1 0;18 C5 1 0;20 A#4 1 0;22 C5 1 0;24 G#4 1 0;30 G#4 1 0;31 A4 1 0;32 A#4 1 0;36 C#5 1 0;40 A#4 1 0;42 A#5 1 0;44 G#5 1 0;46 F#5 1 0;48 F5 1 0;50 F#5 1 0;52 G#5 1 0;54 F5 1 0;56 D#5 1 0;0 F5 1 0;68 C#5 1 0;72 D#5 1 0;76 C5 1 0;80 C#5 1 0;82 C5 1 0;84 A#4 1 0;86 C5 1 0;88 G#4 1 0;94 G#4 1 0;95 A4 1 0;96 A#4 1 0;100 C#5 1 0;104 A#4 1 0;106 A#5 1 0;108 G#5 1 0;110 F#5 1 0;64 F5 1 0;112 G#5 1 0;114 A#5 1 0;116 C6 1 0;118 C#6 1 0;120 D#6 1 0;128 C#6 1 0;134 F6 1 0;140 C#6 1 0;144 C6 1 0;150 D#6 1 0;156 C6 1 0;158 G#5 1 0;159 A5 1 0;160 A#5 1 0;166 C#6 1 0;172 A#5 1 0;176 C6 1 0;180 D#6 1 0;182 G#5 1 0;184 A#5 1 0;186 B5 1 0;188 C6 1 0;192 C#6 1 0;198 F6 1 0;204 C#6 1 0;208 C6 1 0;214 D#6 1 0;220 C6 1 0;222 G#5 1 0;223 A5 1 0;224 A#5 1 0;230 C#6 1 0;236 A#5 1 0;240 C6 1 0;244 D#6 1 0;246 F6 1 0;248 D#6 1 0;250 C#6 1 0;252 C6 1 0;254 G#5 1 0'
# https://onlinesequencer.net/1327293
#song = '2 A5 1 4;1 B4 1 4;3 A5 1 4;4 G5 1 4;5 G5 1 4;10 F#5 1 4;7 F#5 1 4;8 A4 1 4;9 A4 1 4;11 A4 1 4;13 C#6 1 4;14 B5 1 4;12 C#6 1 4;6 F#5 1 4;15 B4 2 4;18 A5 1 4;17 B4 1 4;19 A5 1 4;20 G5 1 4;21 G5 1 4;26 F#5 1 4;23 F#5 1 4;24 A4 1 4;25 A4 1 4;27 A4 1 4;29 C#6 1 4;30 B5 1 4;28 C#6 1 4;22 F#5 1 4;31 B4 2 4;34 A5 1 4;33 B4 1 4;35 A5 1 4;36 G5 1 4;37 G5 1 4;42 F#5 1 4;39 F#5 1 4;40 A4 1 4;41 A4 1 4;43 A4 1 4;45 C#6 1 4;46 B5 1 4;44 C#6 1 4;38 F#5 1 4;47 B4 2 4;50 A5 1 4;49 B4 1 4;51 A5 1 4;52 G5 1 4;53 G5 1 4;58 F#5 1 4;55 F#5 1 4;56 A4 1 4;57 A4 1 4;59 A4 1 4;61 C#6 1 4;62 B5 1 4;60 C#6 1 4;54 F#5 1 4;63 B4 2 4;98 A5 1 4;97 B4 1 4;99 A5 1 4;100 G5 1 4;101 G5 1 4;106 F#5 1 4;103 F#5 1 4;104 A4 1 4;105 A4 1 4;107 A4 1 4;109 C#6 1 4;110 B5 1 4;108 C#6 1 4;102 F#5 1 4;111 B4 2 4;114 A5 1 4;113 B4 1 4;115 A5 1 4;116 G5 1 4;117 G5 1 4;122 F#5 1 4;119 F#5 1 4;120 A4 1 4;121 A4 1 4;123 A4 1 4;125 C#6 1 4;126 B5 1 4;124 C#6 1 4;118 F#5 1 4;127 B4 2 4;66 A5 1 4;65 B4 1 4;67 A5 1 4;68 G5 1 4;69 G5 1 4;74 F#5 1 4;71 F#5 1 4;72 A4 1 4;73 A4 1 4;75 A4 1 4;77 C#6 1 4;78 B5 1 4;76 C#6 1 4;70 F#5 1 4;79 B4 2 4;82 A5 1 4;81 B4 1 4;83 A5 1 4;84 G5 1 4;85 G5 1 4;90 F#5 1 4;87 F#5 1 4;88 A4 1 4;89 A4 1 4;91 A4 1 4;93 C#6 1 4;94 B5 1 4;92 C#6 1 4;86 F#5 1 4;95 B4 2 4;129 B4 1 4;130 B4 1 4;131 B4 1 4;132 B4 1 4;135 B4 1 4;136 B4 1 4;137 B4 1 4;138 B4 1 4;145 D5 1 4;146 D5 1 4;147 D5 1 4;148 D5 1 4;151 E5 1 4;152 E5 1 4;153 E5 1 4;154 E5 1 4;161 B4 1 4;162 B4 1 4;163 B4 1 4;164 B4 1 4;167 B4 1 4;168 B4 1 4;169 B4 1 4;170 B4 1 4;177 D5 1 4;178 D5 1 4;179 D5 1 4;180 D5 1 4;183 E5 1 4;184 E5 1 4;185 E5 1 4;186 E5 1 4;193 B4 1 4;194 B4 1 4;195 B4 1 4;196 B4 1 4;199 B4 1 4;200 B4 1 4;201 B4 1 4;202 B4 1 4;209 D5 1 4;210 D5 1 4;211 D5 1 4;212 D5 1 4;215 E5 1 4;216 E5 1 4;217 E5 1 4;218 E5 1 4;194 D5 1 4;196 F#5 1 4;197 D5 1 4;198 B4 1 4;199 D5 1 4;200 A5 2 4;202 F#5 2 4;204 D5 1 4;205 E5 1 4;206 D5 1 4;207 D5 2 4;0 B4 1 4;209 B4 1 4;211 B4 1 4;212 F#5 1 4;213 D5 1 4;214 B4 1 4;215 D5 1 4;216 G5 2 4;218 F#5 2 4;220 A5 1 4;221 C#6 1 4;222 A5 1 4;223 F#5 2 4;225 B4 1 4;226 B4 1 4;227 B4 1 4;228 B4 1 4;231 B4 1 4;232 B4 1 4;233 B4 1 4;234 B4 1 4;241 D5 1 4;242 D5 1 4;243 D5 1 4;244 D5 1 4;226 D5 1 4;228 F#5 1 4;229 D5 1 4;230 B4 1 4;231 D5 1 4;232 A5 2 4;234 F#5 2 4;236 D5 1 4;237 E5 1 4;238 D5 1 4;239 D5 2 4;241 B4 1 4;243 B4 1 4;244 F#5 1 4;245 D5 1 4;246 B4 1 4;247 D5 1 4;248 G5 2 4;250 F#5 2 4;252 D5 2 4;254 E5 2 4;247 E5 1 4;248 E5 1 4;249 E5 1 4;250 E5 1 4;65 B2 1 4;66 B2 1 4;67 B2 1 4;68 B2 1 4;71 B2 1 4;72 B2 1 4;73 B2 1 4;74 B2 1 4;81 D3 1 4;82 D3 1 4;83 D3 1 4;84 D3 1 4;87 E3 1 4;88 E3 1 4;89 E3 1 4;90 E3 1 4;97 B2 1 4;98 B2 1 4;99 B2 1 4;100 B2 1 4;103 B2 1 4;104 B2 1 4;105 B2 1 4;106 B2 1 4;113 D3 1 4;114 D3 1 4;115 D3 1 4;116 D3 1 4;119 E3 1 4;120 E3 1 4;121 E3 1 4;122 E3 1 4;129 B2 1 4;130 B2 1 4;131 B2 1 4;132 B2 1 4;135 B2 1 4;136 B2 1 4;137 B2 1 4;138 B2 1 4;145 D3 1 4;146 D3 1 4;147 D3 1 4;148 D3 1 4;151 E3 1 4;152 E3 1 4;153 E3 1 4;154 E3 1 4;161 B2 1 4;162 B2 1 4;163 B2 1 4;164 B2 1 4;167 B2 1 4;168 B2 1 4;169 B2 1 4;170 B2 1 4;177 D3 1 4;178 D3 1 4;179 D3 1 4;180 D3 1 4;183 E3 1 4;184 E3 1 4;185 E3 1 4;186 E3 1 4;193 B2 1 4;194 B2 1 4;195 B2 1 4;196 B2 1 4;199 B2 1 4;200 B2 1 4;201 B2 1 4;202 B2 1 4;209 D3 1 4;210 D3 1 4;211 D3 1 4;212 D3 1 4;215 E3 1 4;216 E3 1 4;217 E3 1 4;218 E3 1 4;225 B2 1 4;226 B2 1 4;227 B2 1 4;228 B2 1 4;231 B2 1 4;232 B2 1 4;233 B2 1 4;241 D3 1 4;242 D3 1 4;243 D3 1 4;244 D3 1 4;247 E3 1 4;248 E3 1 4;249 E3 1 4;250 E3 1 4;256 E4 1 4;257 E4 1 4;260 E4 1 4;261 E4 1 4;264 D4 1 4;265 D4 1 4;268 D4 1 4;269 D4 1 4;272 E4 1 4;273 E4 1 4;276 E4 1 4;277 E4 1 4;262 F#5 1 4;266 D5 1 4;270 G5 1 4;274 E5 1 4;278 F#5 1 4;140 F#5 1 4;141 F#5 1 4;143 B4 1 4;156 G5 1 4;158 F#5 1 4;171 B4 1 4;172 F#5 1 4;173 B4 1 4;174 D5 1 4;187 B4 1 4;188 E5 1 4;190 B4 1 4;258 E5 1 4;280 B4 1 4;282 B4 1 4;288 E4 1 4;289 E4 1 4;292 D4 1 4;293 D4 1 4;296 D4 1 4;297 D4 1 4;300 E4 1 4;301 E4 1 4;304 E4 1 4;305 E4 1 4;290 F#5 1 4;294 D5 1 4;298 G5 1 4;302 E5 1 4;306 F#5 1 4;286 E5 1 4;308 B4 1 4;310 B4 1 4;256 F#2 1 4;257 F#2 1 4;258 F#2 1 4;259 F#2 1 4;260 F#2 1 4;261 F#2 1 4;262 F#2 1 4;263 F#2 1 4;264 B2 1 4;265 B2 1 4;266 B2 1 4;267 B2 1 4;268 B2 1 4;269 B2 1 4;270 B2 1 4;271 B2 1 4;272 F#2 1 4;273 F#2 1 4;274 F#2 1 4;275 F#2 1 4;276 F#2 1 4;277 F#2 1 4;278 F#2 1 4;279 F#2 1 4;280 B2 2 4;282 B2 2 4;286 F#2 1 4;287 F#2 1 4;288 F#2 1 4;289 F#2 1 4;290 F#2 1 4;291 F#2 1 4;292 B2 1 4;293 B2 1 4;294 B2 1 4;295 B2 1 4;296 B2 1 4;297 B2 1 4;298 B2 1 4;299 B2 1 4;300 D3 1 4;301 D3 1 4;302 D3 1 4;303 D3 1 4;304 D3 1 4;305 D3 1 4;306 D3 1 4;307 D3 1 4;308 D3 1 4;309 D3 1 4;310 D3 1 4;311 D3 1 4;312 D3 1 4;313 D3 1 4;314 D3 1 4;315 D3 1 4;300 G5 1 4;302 F#5 1 4;304 D5 1 4;300 G5 1 4;306 A4 1 4;308 G5 1 4;310 F#5 1 4;312 C#5 1 4;313 D5 1 4;314 E5 2 4;310 F#5 1 4;316 F#5 1 4;316 E4 1 4;317 E4 1 4;320 E4 1 4;321 E4 1 4;324 D4 1 4;325 D4 1 4;328 D4 1 4;329 D4 1 4;332 E4 1 4;333 E4 1 4;336 E4 1 4;337 E4 1 4;322 F#5 1 4;326 D5 1 4;330 G5 1 4;334 E5 1 4;338 F#5 1 4;318 E5 1 4;340 B4 1 4;342 B4 1 4;348 E4 1 4;349 E4 1 4;352 D4 1 4;353 D4 1 4;356 D4 1 4;357 D4 1 4;360 E4 1 4;361 E4 1 4;364 E4 1 4;365 E4 1 4;350 F#5 1 4;354 D5 1 4;358 G5 1 4;362 E5 1 4;346 E5 1 4;368 B4 1 4;370 B4 1 4;316 F#2 1 4;317 F#2 1 4;318 F#2 1 4;319 F#2 1 4;320 F#2 1 4;321 F#2 1 4;322 F#2 1 4;323 F#2 1 4;324 B2 1 4;325 B2 1 4;326 B2 1 4;327 B2 1 4;328 B2 1 4;329 B2 1 4;330 B2 1 4;331 B2 1 4;332 F#2 1 4;333 F#2 1 4;334 F#2 1 4;335 F#2 1 4;336 F#2 1 4;337 F#2 1 4;338 F#2 1 4;339 F#2 1 4;340 B2 2 4;342 B2 2 4;346 F#2 1 4;347 F#2 1 4;348 F#2 1 4;349 F#2 1 4;350 F#2 1 4;351 F#2 1 4;352 B2 1 4;353 B2 1 4;354 B2 1 4;355 B2 1 4;356 B2 1 4;357 B2 1 4;358 B2 1 4;359 B2 1 4;360 D3 1 4;361 D3 1 4;362 D3 1 4;363 D3 1 4;364 D3 1 4;365 D3 1 4;366 D3 1 4;367 D3 1 4;368 D3 1 4;369 D3 1 4;370 D3 1 4;371 D3 1 4;372 D3 1 4;373 D3 1 4;374 D3 1 4;375 D3 1 4;360 G5 1 4;362 F#5 1 4;364 D5 1 4;360 G5 1 4;366 A4 1 4;368 G5 1 4;372 C#5 1 4;373 D5 1 4;374 E5 2 4;376 F#5 1 4;361 G5 1 4;363 F#5 1 4;369 G5 1 4;366 F#5 1 4;371 B4 1 4;372 G5 1 4;373 G5 1 4;374 G5 1 4;375 G5 1 4;376 G5 1 4;377 G5 1 4;378 F#5 6 4;448 G5 1 4;449 F#5 5 4;456 G5 1 4;457 F#5 1 4;459 E5 1 4;460 D5 1 4;466 A4 1 4;467 A4 1 4;468 G5 1 4;469 G5 1 4;470 G5 1 4;471 G5 1 4;472 G5 1 4;473 G5 1 4;474 F#5 1 4;480 A5 1 4;482 G5 1 4;484 F#5 1 4;486 E5 1 4;489 F#5 1 4;490 D5 1 4;376 B3 1 4;377 B3 1 4;378 B3 1 4;379 B3 1 4;380 B3 1 4;381 B3 1 4;382 B3 1 4;383 B3 1 4;384 A3 1 4;385 A3 1 4;386 A3 1 4;387 A3 1 4;388 A3 1 4;389 A3 1 4;390 A3 1 4;391 A3 1 4;392 G3 1 4;393 G3 1 4;394 G3 1 4;395 G3 1 4;396 G3 1 4;397 G3 1 4;398 G3 1 4;399 G3 1 4;400 D3 1 4;401 D3 1 4;402 C#3 1 4;403 C#3 1 4;404 G3 1 4;405 G3 1 4;406 F#3 1 4;407 F#3 1 4;408 B3 1 4;409 B3 1 4;410 B3 1 4;411 B3 1 4;412 B3 1 4;413 B3 1 4;414 B3 1 4;415 B3 1 4;416 A3 1 4;417 A3 1 4;418 A3 1 4;419 A3 1 4;420 A3 1 4;421 A3 1 4;422 A3 1 4;423 A3 1 4;424 G3 1 4;425 G3 1 4;426 G3 1 4;427 G3 1 4;428 G3 1 4;429 G3 1 4;430 G3 1 4;431 G3 1 4;432 D3 1 4;433 D3 1 4;434 C#3 1 4;435 C#3 1 4;436 G3 1 4;437 G3 1 4;438 F#3 1 4;439 F#3 1 4;440 B3 1 4;441 B3 1 4;442 B3 1 4;443 B3 1 4;444 B3 1 4;445 B3 1 4;446 B3 1 4;447 B3 1 4;448 A3 1 4;449 A3 1 4;450 A3 1 4;451 A3 1 4;452 A3 1 4;453 A3 1 4;454 A3 1 4;455 A3 1 4;456 G3 1 4;457 G3 1 4;458 G3 1 4;459 G3 1 4;460 G3 1 4;461 G3 1 4;462 G3 1 4;463 G3 1 4;464 D3 1 4;465 D3 1 4;466 C#3 1 4;467 C#3 1 4;468 G3 1 4;469 G3 1 4;470 F#3 1 4;471 F#3 1 4;472 B3 1 4;473 B3 1 4;474 B3 1 4;475 B3 1 4;476 B3 1 4;477 B3 1 4;478 B3 1 4;479 B3 1 4;480 A3 1 4;481 A3 1 4;482 A3 1 4;483 A3 1 4;484 A3 1 4;485 A3 1 4;486 A3 1 4;487 A3 1 4;488 G3 1 4;489 G3 1 4;490 G3 1 4;491 G3 1 4;492 G3 1 4;493 G3 1 4;494 G3 1 4;495 G3 1 4;496 D3 1 4;497 D3 1 4;498 C#3 1 4;499 C#3 1 4;500 G3 1 4;501 G3 1 4;502 F#3 1 4;503 F#3 1 4;448 C#4 1 4;449 C#4 1 4;450 C#4 1 4;451 C#4 1 4;452 C#4 1 4;453 C#4 1 4;454 C#4 1 4;455 C#4 1 4;456 D4 1 4;457 D4 1 4;458 D4 1 4;459 D4 1 4;460 D4 1 4;461 D4 1 4;462 D4 1 4;463 D4 1 4;480 C#4 1 4;481 C#4 1 4;482 C#4 1 4;483 C#4 1 4;484 C#4 1 4;485 C#4 1 4;486 C#4 1 4;487 C#4 1 4;488 D4 1 4;489 D4 1 4;490 D4 1 4;491 D4 1 4;492 D4 1 4;493 D4 1 4;494 D4 1 4;495 D4 1 4;234 B2 1 4;522 E3 1 4;378 E5 1 4;382 F#5 1 4;382 E5 1 4;386 F#5 1 4;386 E5 1 4;390 C5 1 4;390 D5 1 4'
# https://onlinesequencer.net/1536953
#song = '0 B5 1 0;4 A5 1 0;14 E5 1 0;10 G#5 1 0;18 C#5 1 0;26 F#5 1 0;28 G#5 1 0;30 A5 1 0;32 B5 1 0;36 A5 1 0;0 E5 1 0;0 F#5 1 0;0 C#5 1 0;4 E5 1 0;4 F#5 1 0;4 C#5 1 0;0 F#3 1 0;14 F#3 1 0;18 F#4 1 0;20 A4 1 0;22 C#5 1 0;24 F#4 1 0;26 A4 1 0;28 C#5 1 0;30 A4 1 0;32 F#5 1 0;36 F#5 1 0;32 D5 1 0;36 D5 1 0;42 G#5 1 0;50 C#6 1 0;46 E5 1 0;32 D3 1 0;46 D3 1 0;56 B5 1 0;60 C#6 1 0;56 B6 1 0;60 C#7 1 0;60 C#7 1 0;50 D4 1 0;52 F#4 1 0;54 C#5 1 0;56 D4 1 0;58 F#4 1 0;60 C#5 1 0;62 D4 1 0;64 B6 1 0;68 A6 1 0;64 B6 1 0;68 A6 1 0;64 C#6 1 0;68 C#6 1 0;64 E6 1 0;64 F#6 1 0;68 E6 1 0;68 F#6 1 0;64 F#3 1 0;64 F#2 1 0;68 F#4 1 0;68 A4 1 0;68 C#5 1 0;68 E5 1 0;74 G#6 1 0;78 E6 1 0;82 C#6 1 0;74 G#6 1 0;78 E6 1 0;82 C#6 1 0;72 F#4 1 0;74 A4 1 0;74 C#5 1 0;74 E5 1 0;78 F#3 1 0;78 F#2 1 0;86 F#4 1 0;86 A4 1 0;86 C#5 1 0;86 F#5 1 0;92 E4 1 0;92 A4 1 0;92 C#5 1 0;90 F#6 1 0;92 G#6 1 0;94 A6 1 0;96 B6 1 0;100 A6 1 0;90 F#6 1 0;92 G#6 1 0;94 A6 1 0;96 B6 1 0;100 A6 1 0;96 D3 1 0;96 D2 1 0;100 F#4 1 0;100 A4 1 0;100 C#5 1 0;100 F#5 1 0;96 B5 1 0;96 D6 1 0;96 F#6 1 0;100 D6 1 0;100 F#6 1 0;100 A5 1 0;106 G#6 1 0;110 E6 1 0;114 C#7 1 0;106 G#5 1 0;110 E5 1 0;114 C#6 1 0;106 G#6 1 0;104 C#5 1 0;106 A4 1 0;110 D3 1 0;110 D2 1 0;116 D4 1 0;116 F#4 1 0;116 A4 1 0;116 C#5 1 0;134 G#5 1 0;132 A5 1 0;140 B5 1 0;142 A5 1 0;138 G#5 1 0;128 F#4 1 0;128 G#4 1 0;128 A4 1 0;128 C#5 1 0;140 F#4 1 0;142 A4 1 0;148 A5 1 0;150 G#5 1 0;156 B5 1 0;154 A5 1 0;144 F#4 1 0;144 A4 1 0;144 C#5 1 0;144 E5 1 0;154 E5 1 0;156 C#5 1 0;158 A4 1 0;160 F#4 1 0;160 D4 1 0;160 A4 1 0;160 C#5 1 0;164 A5 1 0;166 G#5 1 0;170 G#5 1 0;172 B5 1 0;174 A5 1 0;176 D4 1 0;176 E4 1 0;176 F#4 1 0;176 A4 1 0;178 B5 1 0;182 C#6 1 0;188 C#4 1 0;192 B3 1 0;192 D4 1 0;192 F#4 1 0;192 A4 1 0;196 A5 1 0;198 G#5 1 0;202 G#5 1 0;204 B5 1 0;206 A5 1 0;202 B3 1 0;204 D4 1 0;206 F#4 1 0;208 E4 1 0;208 C#4 1 0;208 G#4 1 0;208 B4 1 0;210 B5 1 0;214 C#6 1 0;218 E5 1 0;222 F#5 1 0;218 B4 1 0;220 G#4 1 0;222 E4 1 0;224 D4 1 0;224 F#4 1 0;224 A4 1 0;224 C#5 1 0;228 D4 1 0;230 F#4 1 0;232 A4 1 0;234 C#5 1 0;236 D5 1 0;238 F#5 1 0;240 G#5 1 0;244 A5 1 0;246 G#5 1 0;240 E4 1 0;240 F#4 1 0;240 G#4 1 0;240 B4 1 0;246 E5 1 0;252 B4 1 0;256 F#4 1 0;256 G#4 1 0;256 A4 1 0;256 C#5 1 0;260 F#4 1 0;262 F#3 1 0;262 F#3 1 0;260 A5 1 0;262 G#5 1 0;262 G#5 1 0;268 B5 1 0;266 G#5 1 0;270 A5 1 0;266 A3 1 0;268 C#4 1 0;270 F#4 1 0;268 B5 1 0;272 A4 1 0;272 C#5 1 0;272 E5 1 0;276 A5 1 0;278 G#5 1 0;282 A5 1 0;284 B5 1 0;276 F#4 1 0;276 F#3 1 0;282 E4 1 0;282 E3 1 0;288 D4 1 0;288 D3 1 0;292 A5 1 0;294 G#5 1 0;298 G#5 1 0;300 B5 1 0;302 A5 1 0;292 D4 1 0;292 F#4 1 0;292 A4 1 0;292 C#5 1 0;298 D4 1 0;300 F#4 1 0;300 A4 1 0;300 C#5 1 0;302 D4 1 0;306 B5 1 0;310 C#6 1 0;314 E5 1 0;304 F#4 1 0;304 C#5 1 0;308 D4 1 0;308 E4 1 0;308 F#4 1 0;308 A4 1 0;312 D4 1 0;314 C#4 1 0;312 D3 1 0;314 C#3 1 0;320 B3 1 0;320 B2 1 0;320 F#5 1 0;326 F#5 1 0;332 B5 1 0;334 G#5 1 0;326 B3 1 0;326 D4 1 0;326 F#4 1 0;326 A4 1 0;332 C#4 1 0;332 C#3 1 0;338 C#4 1 0;338 E4 1 0;338 G#4 1 0;338 B4 1 0;344 D4 1 0;344 D3 1 0;348 B3 1 0;348 D4 1 0;348 F#4 1 0;348 A4 1 0;348 F#5 1 0;344 D5 1 0;344 A4 1 0;344 F4 1 0;352 C#4 1 0;352 C#3 1 0;352 G#5 1 0;358 G#5 1 0;362 G#5 1 0;364 A5 1 0;366 B5 1 0;358 C#4 1 0;358 E4 1 0;358 G#4 1 0;358 B4 1 0;364 D4 1 0;364 D3 1 0;370 C6 1 0;374 D6 1 0;376 E6 1 0;380 B5 1 0;384 C#6 1 0;370 D4 1 0;370 F4 1 0;370 A4 1 0;370 C5 1 0;376 E4 1 0;376 E3 1 0;384 C#7 1 0;384 F#2 1 0;384 F#3 1 0;388 A4 1 0;388 C#5 1 0;388 E5 1 0;390 F#4 1 0;392 G#4 1 0;392 A4 1 0;392 C#5 1 0;394 F#4 1 0;394 G#4 1 0;394 A4 1 0;394 C#5 1 0;398 F#3 1 0;400 F#3 1 0;400 F#2 1 0;404 C#5 1 0;406 C#5 1 0;404 A4 1 0;404 G#4 1 0;405 F#4 1 0;406 A4 1 0;406 G#4 1 0;407 F#4 1 0;408 F#3 1 0;408 F#2 1 0;410 C#6 1 0;412 E6 1 0;414 C#6 1 0;410 C#5 1 0;412 E5 1 0;414 C#5 1 0;410 C#6 1 0;412 E6 1 0;414 C#6 1 0;416 G#6 1 0;418 F#6 1 0;416 G#5 1 0;418 F#5 1 0;416 G#6 1 0;418 F#6 1 0;420 F#6 1 0;422 E6 1 0;426 F#6 1 0;430 G#6 1 0;434 A6 1 0;440 A6 1 0;442 G#6 1 0;444 B5 1 0;446 B5 1 0;440 E6 1 0;440 E6 1 0;416 F#3 1 0;416 F#2 1 0;416 F#2 1 0;420 F#4 1 0;422 A4 1 0;422 C#5 1 0;422 E5 1 0;426 A4 1 0;426 C#5 1 0;426 E5 1 0;428 F#4 1 0;430 F#3 1 0;430 F#2 1 0;430 F#2 1 0;434 F#4 1 0;436 A4 1 0;436 C#5 1 0;436 E5 1 0;440 A4 1 0;440 C#5 1 0;440 E5 1 0;442 F#4 1 0;444 G#4 1 0;444 A4 1 0;444 C#5 1 0;450 C#6 1 0;452 C#6 1 0;452 F#6 1 0;454 E6 1 0;458 F#6 1 0;462 G#6 1 0;466 A6 1 0;470 B6 1 0;472 C#7 1 0;474 A6 1 0;476 B6 1 0;478 F#6 1 0;448 F#3 1 0;448 F#2 1 0;448 F#2 1 0;452 F#4 1 0;454 A4 1 0;454 C#5 1 0;454 E5 1 0;458 E5 1 0;458 C#5 1 0;458 A4 1 0;460 F#4 1 0;462 F#3 1 0;462 F#2 1 0;462 F#2 1 0;466 F#4 1 0;468 A4 1 0;468 C#5 1 0;468 E5 1 0;472 A4 1 0;472 C#5 1 0;472 E5 1 0;476 F#4 1 0;476 G#4 1 0;476 A4 1 0;476 C#5 1 0;474 C#6 1 0;476 E6 1 0;478 C#6 1 0;480 B6 1 0;482 A6 1 0;480 B5 1 0;482 A5 1 0;480 B6 1 0;482 A6 1 0;484 F#6 1 0;486 E6 1 0;490 F#6 1 0;494 G#6 1 0;498 A6 1 0;506 F#6 1 0;508 B6 1 0;510 A6 1 0;508 B5 1 0;510 A5 1 0;506 F#5 1 0;506 F#6 1 0;508 B6 1 0;510 A6 1 0;500 A4 1 0;500 C#5 1 0;504 A4 1 0;504 C#5 1 0;480 D3 1 0;480 D2 1 0;480 D2 1 0;494 D3 1 0;494 D2 1 0;494 D2 1 0;484 D4 1 0;486 F#4 1 0;486 A4 1 0;486 C#5 1 0;490 F#4 1 0;490 A4 1 0;490 C#5 1 0;492 D4 1 0;498 D4 1 0;500 F#4 1 0;504 F#4 1 0;506 D4 1 0;508 E4 1 0;508 F#4 1 0;508 A4 1 0;512 D3 1 0;512 D2 1 0;512 D2 1 0;516 D4 1 0;518 F#4 1 0;518 A4 1 0;518 C#5 1 0;522 F#4 1 0;522 A4 1 0;522 C#5 1 0;524 D4 1 0;526 D3 1 0;526 D2 1 0;526 D2 1 0;530 D4 1 0;532 F#4 1 0;532 A4 1 0;532 C#5 1 0;536 F#4 1 0;536 A4 1 0;536 C#5 1 0;540 D4 1 0;540 E4 1 0;540 F#4 1 0;540 A4 1 0;516 F#6 1 0;518 E6 1 0;522 F#6 1 0;526 G#6 1 0;530 A6 1 0;534 B6 1 0;536 C#7 1 0;538 A6 1 0;540 B6 1 0;542 F#6 1 0;538 C#6 1 0;540 E6 1 0;542 C#6 1 0;544 G#6 1 0;546 F#6 1 0;544 G#6 1 0;546 F#6 1 0;544 G#5 1 0;546 F#5 1 0;548 F#6 1 0;550 E6 1 0;554 F#6 1 0;558 G#6 1 0;562 A6 1 0;568 A6 1 0;570 G#6 1 0;572 B5 1 0;574 B5 1 0;568 E6 1 0;568 E6 1 0;544 F#3 1 0;544 F#2 1 0;544 F#2 1 0;548 F#4 1 0;550 A4 1 0;550 C#5 1 0;550 E5 1 0;554 A4 1 0;554 C#5 1 0;554 E5 1 0;556 F#4 1 0;558 F#3 1 0;558 F#2 1 0;558 F#2 1 0;562 F#4 1 0;564 A4 1 0;564 C#5 1 0;564 E5 1 0;568 A4 1 0;568 C#5 1 0;568 E5 1 0;570 F#4 1 0;572 G#4 1 0;572 A4 1 0;572 C#5 1 0;578 C#6 1 0;580 C#6 1 0;598 B6 1 0;600 C#7 1 0;602 A6 1 0;604 B6 1 0;606 F#6 1 0;580 F#6 1 0;582 E6 1 0;586 F#6 1 0;590 G#6 1 0;594 A6 1 0;576 F#3 1 0;576 F#2 1 0;576 F#2 1 0;580 F#4 1 0;582 A4 1 0;582 C#5 1 0;582 E5 1 0;586 E5 1 0;586 C#5 1 0;586 A4 1 0;588 F#4 1 0;590 F#3 1 0;590 F#2 1 0;590 F#2 1 0;580 C#6 1 0;602 C#6 1 0;604 E6 1 0;606 C#6 1 0;594 F#4 1 0;596 A4 1 0;596 C#5 1 0;596 E5 1 0;600 A4 1 0;600 C#5 1 0;600 E5 1 0;604 F#4 1 0;604 G#4 1 0;604 A4 1 0;604 C#5 1 0;608 B6 1 0;610 A6 1 0;608 B6 1 0;610 A6 1 0;608 B5 1 0;610 A5 1 0;608 D3 1 0;608 D2 1 0;608 D2 1 0;612 D4 1 0;614 F#4 1 0;614 A4 1 0;614 C#5 1 0;618 F#4 1 0;618 A4 1 0;618 C#5 1 0;620 D4 1 0;622 D3 1 0;622 D2 1 0;622 D2 1 0;612 F#6 1 0;614 E6 1 0;618 F#6 1 0;622 G#6 1 0;626 A6 1 0;634 F#5 1 0;636 B5 1 0;638 A5 1 0;634 F#6 1 0;636 B6 1 0;638 A6 1 0;634 F#6 1 0;636 B6 1 0;638 A6 1 0;626 D4 1 0;628 F#4 1 0;628 A4 1 0;628 C#5 1 0;632 F#4 1 0;632 A4 1 0;632 C#5 1 0;634 D4 1 0;636 E4 1 0;636 F#4 1 0;636 A4 1 0;640 D3 1 0;640 D2 1 0;640 D2 1 0;644 D4 1 0;646 F#4 1 0;646 A4 1 0;646 C#5 1 0;650 F#4 1 0;650 A4 1 0;650 C#5 1 0;652 D4 1 0;654 D3 1 0;654 D2 1 0;654 D2 1 0;644 F#6 1 0;646 E6 1 0;650 F#6 1 0;654 G#6 1 0;658 A6 1 0;662 B6 1 0;664 C#7 1 0;666 A6 1 0;668 B6 1 0;670 F#6 1 0;658 D4 1 0;660 F#4 1 0;660 A4 1 0;660 C#5 1 0;664 D4 1 0;664 F#4 1 0;664 A4 1 0;664 C#5 1 0;668 E3 1 0;668 E2 1 0;668 E2 1 0;680 F#3 1 0;672 A6 1 0;676 A6 1 0;680 A6 1 0;672 A6 1 0;676 A6 1 0;680 A6 1 0;672 A5 1 0;676 A5 1 0;680 A5 1 0;672 C#6 1 0;672 E6 1 0;676 C#6 1 0;676 E6 1 0;680 C#6 1 0;680 E6 1 0;672 F#3 1 0;676 F#3 1 0;672 F#2 1 0;674 F#2 1 0;676 F#2 1 0;678 F#2 1 0;680 F#2 1 0;672 F#2 1 0;676 F#2 1 0;680 F#2 1 0;686 A5 1 0;686 C#6 1 0;686 E6 1 0;686 A6 1 0;686 A6 1 0;690 A5 1 0;690 C#6 1 0;690 E6 1 0;690 A6 1 0;694 A5 1 0;694 C#6 1 0;694 E6 1 0;694 A6 1 0;696 G#5 1 0;700 B5 1 0;696 G#6 1 0;700 B6 1 0;696 G#6 1 0;700 B6 1 0;682 A2 1 0;682 A3 1 0;684 C#4 1 0;684 C#3 1 0;684 C#3 1 0;686 F#3 1 0;686 F#4 1 0;696 C#6 1 0;696 E6 1 0;700 C#6 1 0;700 E6 1 0;690 F#3 1 0;692 F#3 1 0;694 F#3 1 0;696 E3 1 0;698 E3 1 0;700 E3 1 0;702 E3 1 0;692 F#3 1 0;692 F#4 1 0;696 E4 1 0;696 E3 1 0;700 E3 1 0;700 E4 1 0;686 F#3 1 0;704 A5 1 0;704 A6 1 0;704 A6 1 0;712 A6 1 0;718 A6 1 0;718 A6 1 0;712 A6 1 0;708 A6 1 0;708 A6 1 0;708 A5 1 0;712 A5 1 0;718 A5 1 0;704 C#6 1 0;704 E6 1 0;708 C#6 1 0;708 E6 1 0;712 C#6 1 0;712 E6 1 0;718 C#6 1 0;718 E6 1 0;704 D4 1 0;708 D4 1 0;712 D4 1 0;704 D3 1 0;704 D3 1 0;706 D3 1 0;708 D3 1 0;708 D3 1 0;710 D3 1 0;712 D3 1 0;712 D3 1 0;714 A3 1 0;714 A2 1 0;716 F#3 1 0;716 F#2 1 0;716 F#2 1 0;718 D2 1 0;718 D2 1 0;718 D3 1 0;722 D2 1 0;724 D2 1 0;724 D2 1 0;726 D2 1 0;728 E2 1 0;728 E2 1 0;730 E2 1 0;732 E2 1 0;732 E2 1 0;734 E2 1 0;722 D3 1 0;726 D3 1 0;728 E3 1 0;732 E3 1 0;722 A6 1 0;726 A6 1 0;728 G#6 1 0;732 B6 1 0;722 A5 1 0;726 A5 1 0;728 G#5 1 0;732 B5 1 0;722 C#6 1 0;722 E6 1 0;726 C#6 1 0;726 E6 1 0;728 C#6 1 0;728 E6 1 0;732 C#6 1 0;732 E6 1 0;736 F#2 1 0;736 F#2 1 0;738 F#2 1 0;740 F#2 1 0;740 F#2 1 0;742 F#2 1 0;744 F#2 1 0;744 F#2 1 0;736 F#3 1 0;740 F#3 1 0;744 F#3 1 0;746 A2 1 0;748 C#3 1 0;746 A3 1 0;748 C#4 1 0;748 C#3 1 0;750 E2 1 0;750 E3 1 0;750 E2 1 0;736 A6 1 0;740 A6 1 0;744 A6 1 0;750 A6 1 0;750 A6 1 0;744 A6 1 0;740 A6 1 0;736 A6 1 0;736 A5 1 0;740 A5 1 0;744 A5 1 0;750 A5 1 0;736 C#6 1 0;736 E6 1 0;740 C#6 1 0;740 E6 1 0;744 C#6 1 0;744 E6 1 0;750 C#6 1 0;750 E6 1 0;754 A6 1 0;758 A6 1 0;760 G#6 1 0;764 B6 1 0;768 A6 1 0;774 F#6 1 0;780 C#7 1 0;754 A6 1 0;758 A6 1 0;760 G#6 1 0;764 B6 1 0;768 A6 1 0;774 F#6 1 0;780 C#7 1 0;754 A5 1 0;758 A5 1 0;760 G#5 1 0;764 B5 1 0;768 A5 1 0;774 F#5 1 0;780 C#6 1 0;754 C#6 1 0;754 E6 1 0;758 C#6 1 0;758 E6 1 0;760 C#6 1 0;760 E6 1 0;764 C#6 1 0;764 E6 1 0;754 E2 1 0;756 E2 1 0;758 E2 1 0;760 E2 1 0;762 E2 1 0;756 E3 1 0;760 E3 1 0;756 E2 1 0;760 E2 1 0;764 A3 1 0;766 C#4 1 0;764 A2 1 0;766 C#3 1 0;764 A2 1 0;768 D3 1 0;768 D4 1 0;768 D3 1 0;768 D4 1 0;774 D4 1 0;774 D3 1 0;774 D3 1 0;774 D4 1 0;780 D3 1 0;780 D2 1 0;780 D2 1 0;780 D3 1 0;768 C#6 1 0;768 E6 1 0;774 D6 1 0;774 B5 1 0;780 F#6 1 0;780 A6 1 0;784 C2 1 8'
# https://onlinesequencer.net/1087370 - Battle Music
#song = '0 F4 1 0;0 G#4 1 0;0 C4 1 0;0 F3 1 0;0 F2 1 0;12 A#4 1 0;18 G4 1 0;12 G4 1 0;18 D#4 1 0;24 A#4 1 0;30 C5 1 0;24 F4 1 0;24 C#4 1 0;12 F3 1 0;18 F3 1 0;12 F2 1 0;18 F2 1 0;24 F2 1 0;24 F3 1 0;36 F3 1 0;36 F2 1 0;36 D#5 1 0;36 D#4 1 0;36 G4 1 0;36 A#4 1 0;48 G#4 1 0;48 C5 1 0;48 F4 1 0;48 C4 1 0;48 F3 1 0;48 F2 1 0;60 F2 1 0;66 F2 1 0;60 F3 1 0;66 F3 1 0;60 G#4 1 0;60 F4 1 0;66 D#4 1 0;66 G4 1 0;72 A#4 1 0;72 F3 1 0;72 F2 1 0;72 C#4 1 0;72 F4 1 0;84 F2 1 0;84 F3 1 0;84 E4 1 0;84 E5 1 0;90 G5 1 0;90 G4 1 0;84 A#4 1 0;90 A#4 1 0;96 F5 1 0;96 F6 1 0;96 G#5 1 0;96 C6 1 0;96 F2 1 0;96 F3 1 0;98 C4 1 0;98 G#4 1 0;99 F4 1 0;100 G#4 1 0;102 G#4 1 0;101 F4 1 0;103 F4 1 0;100 C4 1 0;102 C4 1 0;104 G#4 1 0;106 G#4 1 0;105 F4 1 0;107 F4 1 0;104 C4 1 0;106 C4 1 0;108 A#4 1 0;110 A#4 1 0;112 A#4 1 0;114 A#4 1 0;116 A#4 1 0;118 A#4 1 0;109 F4 1 0;111 F4 1 0;113 F4 1 0;115 F4 1 0;117 F4 1 0;119 F4 1 0;108 C#4 1 0;110 C#4 1 0;112 C#4 1 0;114 C#4 1 0;116 C#4 1 0;118 C#4 1 0;114 E5 1 0;114 E6 1 0;114 E6 1 0;120 G6 1 0;120 G5 1 0;120 A#5 1 0;120 C#6 1 0;120 C#5 1 0;122 C#5 1 0;124 C#5 1 0;126 C#5 1 0;128 C#5 1 0;130 C#5 1 0;120 F4 1 0;122 F4 1 0;124 F4 1 0;126 F4 1 0;128 F4 1 0;130 F4 1 0;121 A#4 1 0;123 A#4 1 0;125 A#4 1 0;127 A#4 1 0;129 A#4 1 0;131 A#4 1 0;132 C5 1 0;134 C5 1 0;136 C5 1 0;138 C5 1 0;140 C5 1 0;142 C5 1 0;132 F4 1 0;134 F4 1 0;136 F4 1 0;138 F4 1 0;140 F4 1 0;142 F4 1 0;133 G#4 1 0;135 G#4 1 0;137 G#4 1 0;139 G#4 1 0;141 G#4 1 0;143 G#4 1 0;138 F5 1 0;138 F6 1 0;144 G#6 1 0;144 F3 1 0;145 C4 1 0;146 G#4 1 0;146 C4 1 0;147 F4 1 0;148 G#4 1 0;149 F4 1 0;148 C4 1 0;150 G#4 1 0;152 G#4 1 0;154 G#4 1 0;151 F4 1 0;153 F4 1 0;155 F4 1 0;150 C4 1 0;152 C4 1 0;154 C4 1 0;144 G#5 1 0;144 C6 1 0;144 F6 1 0;156 C#4 1 0;156 A#4 1 0;158 A#4 1 0;160 A#4 1 0;162 A#4 1 0;164 A#4 1 0;166 A#4 1 0;157 F4 1 0;159 F4 1 0;161 F4 1 0;163 F4 1 0;165 F4 1 0;167 F4 1 0;158 C#4 1 0;160 C#4 1 0;162 C#4 1 0;164 C#4 1 0;166 C#4 1 0;156 G6 1 0;156 G5 1 0;162 G#6 1 0;162 G#5 1 0;168 A#6 1 0;168 A#5 1 0;168 C#5 1 0;170 C#5 1 0;172 C#5 1 0;174 C#5 1 0;176 C#5 1 0;178 C#5 1 0;168 F4 1 0;170 F4 1 0;172 F4 1 0;174 F4 1 0;176 F4 1 0;178 F4 1 0;169 A#4 1 0;171 A#4 1 0;173 A#4 1 0;175 A#4 1 0;177 A#4 1 0;179 A#4 1 0;180 C5 1 0;182 C5 1 0;184 C5 1 0;186 C5 1 0;188 C5 1 0;190 C5 1 0;180 F4 1 0;182 F4 1 0;184 F4 1 0;186 F4 1 0;188 F4 1 0;190 F4 1 0;181 G#4 1 0;183 G#4 1 0;185 G#4 1 0;187 G#4 1 0;189 G#4 1 0;191 G#4 1 0;168 C#6 1 0;168 F6 1 0;180 G5 1 0;180 G6 1 0;178 G#6 1 0;178 G#5 1 0;192 F3 1 0;192 F2 1 0;192 F2 1 0;198 C4 1 0;200 C4 1 0;202 C4 1 0;204 C#4 1 0;206 C#4 1 0;208 C#4 1 0;210 C#4 1 0;212 C#4 1 0;214 C#4 1 0;198 F4 1 0;200 F4 1 0;202 F4 1 0;204 F4 1 0;206 F4 1 0;208 F4 1 0;210 F4 1 0;212 F4 1 0;214 F4 1 0;198 G#4 1 0;200 G#4 1 0;202 G#4 1 0;204 A#4 1 0;206 A#4 1 0;208 A#4 1 0;210 A#4 1 0;212 A#4 1 0;214 A#4 1 0;192 F6 1 0;192 F5 1 0;192 G#5 1 0;192 C6 1 0;216 F3 1 0;216 F2 1 0;216 F2 1 0;198 F5 1 0;204 E5 1 0;210 G5 1 0;198 C5 1 0;204 C#5 1 0;210 C#5 1 0;210 E5 1 0;222 G#4 1 0;224 G#4 1 0;226 G#4 1 0;228 A#4 1 0;230 A#4 1 0;232 A#4 1 0;234 A#4 1 0;236 A#4 1 0;238 A#4 1 0;222 F4 1 0;224 F4 1 0;226 F4 1 0;228 F4 1 0;230 F4 1 0;232 F4 1 0;234 F4 1 0;236 F4 1 0;238 F4 1 0;222 C4 1 0;224 C4 1 0;226 C4 1 0;228 C#4 1 0;230 C#4 1 0;232 C#4 1 0;234 C#4 1 0;236 C#4 1 0;238 C#4 1 0;222 F5 1 0;222 C5 1 0;228 C#5 1 0;228 E5 1 0;228 G5 1 0;234 A#5 1 0;234 G5 1 0;234 E5 1 0;234 C#5 1 0;240 F3 1 0;240 F2 1 0;240 F2 1 0;246 C4 1 0;248 C4 1 0;250 C4 1 0;252 C#4 1 0;254 C#4 1 0;256 C#4 1 0;258 C#4 1 0;260 C#4 1 0;262 C#4 1 0;246 F4 1 0;248 F4 1 0;250 F4 1 0;252 F4 1 0;254 F4 1 0;256 F4 1 0;258 F4 1 0;260 F4 1 0;262 F4 1 0;246 G#4 1 0;248 G#4 1 0;250 G#4 1 0;252 A#4 1 0;254 A#4 1 0;256 A#4 1 0;258 A#4 1 0;260 A#4 1 0;262 A#4 1 0;246 F6 1 0;252 E6 1 0;258 G6 1 0;246 F5 1 0;252 E5 1 0;258 G5 1 0;246 G#5 1 0;246 C6 1 0;252 G5 1 0;252 A#5 1 0;258 A#5 1 0;258 C#6 1 0;264 F3 1 0;264 F2 1 0;264 F2 1 0;270 G#4 1 0;272 G#4 1 0;274 G#4 1 0;276 A#4 1 0;278 A#4 1 0;280 A#4 1 0;282 A#4 1 0;284 A#4 1 0;286 A#4 1 0;270 F4 1 0;272 F4 1 0;274 F4 1 0;276 F4 1 0;278 F4 1 0;280 F4 1 0;282 F4 1 0;284 F4 1 0;286 F4 1 0;270 C4 1 0;272 C4 1 0;274 C4 1 0;276 C#4 1 0;278 C#4 1 0;280 C#4 1 0;282 C#4 1 0;284 C#4 1 0;286 C#4 1 0;270 C7 1 0;270 C6 1 0;270 F6 1 0;270 G#6 1 0;276 A#6 1 0;282 G6 1 0;276 A#5 1 0;282 G5 1 0;276 C#6 1 0;276 F6 1 0;282 A#5 1 0;282 C#6 1 0;288 F5 1 0;288 F6 1 0;288 G#5 1 0;288 C6 1 0;288 F3 1 0;288 F2 1 0;290 G#4 1 0;291 F4 1 0;290 C4 1 0;292 C4 1 0;294 C4 1 0;296 C4 1 0;298 C4 1 0;293 F4 1 0;295 F4 1 0;297 F4 1 0;299 F4 1 0;292 G#4 1 0;294 G#4 1 0;296 G#4 1 0;298 G#4 1 0;300 G#4 1 0;302 G#4 1 0;304 G#4 1 0;306 G#4 1 0;308 G#4 1 0;310 G#4 1 0;301 F4 1 0;303 F4 1 0;305 F4 1 0;307 F4 1 0;309 F4 1 0;311 F4 1 0;300 C4 1 0;302 C4 1 0;304 C4 1 0;306 C4 1 0;308 C4 1 0;310 C4 1 0;312 C#4 1 0;312 A#4 1 0;314 A#4 1 0;316 A#4 1 0;318 A#4 1 0;320 A#4 1 0;322 A#4 1 0;324 A#4 1 0;326 A#4 1 0;328 A#4 1 0;330 A#4 1 0;332 A#4 1 0;334 A#4 1 0;313 F4 1 0;315 F4 1 0;317 F4 1 0;319 F4 1 0;321 F4 1 0;323 F4 1 0;325 F4 1 0;327 F4 1 0;329 F4 1 0;331 F4 1 0;333 F4 1 0;335 F4 1 0;314 C#4 1 0;316 C#4 1 0;318 C#4 1 0;320 C#4 1 0;322 C#4 1 0;324 C#4 1 0;326 C#4 1 0;328 C#4 1 0;330 C#4 1 0;332 C#4 1 0;334 C#4 1 0;312 G6 1 0;312 G5 1 0;312 A#5 1 0;312 C#6 1 0;324 A#6 1 0;324 A#5 1 0;324 C#6 1 0;324 F6 1 0;336 G#6 1 0;336 G#5 1 0;336 F3 1 0;337 C4 1 0;338 G#4 1 0;338 C4 1 0;339 F4 1 0;340 G#4 1 0;341 F4 1 0;340 C4 1 0;342 G#4 1 0;344 G#4 1 0;346 G#4 1 0;343 F4 1 0;345 F4 1 0;347 F4 1 0;342 C4 1 0;344 C4 1 0;346 C4 1 0;348 G#4 1 0;350 G#4 1 0;352 G#4 1 0;354 G#4 1 0;356 G#4 1 0;358 G#4 1 0;348 C4 1 0;350 C4 1 0;352 C4 1 0;354 C4 1 0;356 C4 1 0;358 C4 1 0;349 F4 1 0;351 F4 1 0;353 F4 1 0;355 F4 1 0;357 F4 1 0;359 F4 1 0;336 C6 1 0;336 F6 1 0;348 G6 1 0;354 G#6 1 0;348 G5 1 0;354 G#5 1 0;360 A#5 1 0;360 A#6 1 0;360 A#4 1 0;362 A#4 1 0;364 A#4 1 0;366 A#4 1 0;368 A#4 1 0;370 A#4 1 0;372 A#4 1 0;374 A#4 1 0;376 A#4 1 0;378 A#4 1 0;380 A#4 1 0;382 A#4 1 0;361 F4 1 0;363 F4 1 0;365 F4 1 0;367 F4 1 0;369 F4 1 0;371 F4 1 0;373 F4 1 0;375 F4 1 0;377 F4 1 0;379 F4 1 0;381 F4 1 0;383 F4 1 0;360 C#4 1 0;362 C#4 1 0;364 C#4 1 0;366 C#4 1 0;368 C#4 1 0;370 C#4 1 0;372 C#4 1 0;374 C#4 1 0;376 C#4 1 0;378 C#4 1 0;380 C#4 1 0;382 C#4 1 0;360 C#6 1 0;360 F6 1 0;372 G5 1 0;372 G6 1 0;378 E6 1 0;378 E5 1 0;384 F6 1 0;384 F5 1 0;384 G#5 1 0;384 C6 1 0;384 F2 1 0;384 F3 1 0;386 A#3 1 0;387 C4 1 0;388 A#3 1 0;390 F4 1 0;392 A#3 1 0;394 C4 1 0;396 F3 1 0;396 F2 1 0;398 A#3 1 0;399 C4 1 0;400 A#3 1 0;401 C4 1 0;402 F4 1 0;403 A#3 1 0;404 C4 1 0;405 F4 1 0;406 A#3 1 0;407 C4 1 0;408 G5 1 0;408 G6 1 0;408 A#5 1 0;408 C#6 1 0;408 F3 1 0;408 F2 1 0;410 A#3 1 0;411 C#4 1 0;412 A#3 1 0;413 C#4 1 0;414 G4 1 0;415 A#3 1 0;416 C#4 1 0;417 G4 1 0;418 A#3 1 0;419 C#4 1 0;420 G4 1 0;423 G4 1 0;426 G4 1 0;429 G4 1 0;421 A#3 1 0;422 C#4 1 0;424 A#3 1 0;425 C#4 1 0;427 A#3 1 0;428 C#4 1 0;430 A#3 1 0;431 C#4 1 0;420 A#5 1 0;420 A#6 1 0;426 C#6 1 0;426 C#7 1 0;420 C#6 1 0;420 F6 1 0;426 F6 1 0;426 A#6 1 0;384 F3 1 0;396 F3 1 0;408 F3 1 0;432 C7 1 0;432 C6 1 0;432 F6 1 0;432 G#6 1 0;432 F3 1 0;432 F2 1 0;434 A#3 1 0;435 C4 1 0;436 A#3 1 0;438 F4 1 0;440 A#3 1 0;442 C4 1 0;432 F3 1 0;444 F3 1 0;444 F3 1 0;444 F2 1 0;446 A#3 1 0;447 C4 1 0;448 A#3 1 0;449 C4 1 0;450 F4 1 0;451 A#3 1 0;452 C4 1 0;453 F4 1 0;454 A#3 1 0;455 C4 1 0;444 G6 1 0;444 G5 1 0;450 A#5 1 0;450 A#6 1 0;456 C6 1 0;456 C7 1 0;456 A#6 1 0;456 F6 1 0;456 F3 1 0;456 F2 1 0;456 F3 1 0;458 A#3 1 0;459 C#4 1 0;460 A#3 1 0;461 C#4 1 0;462 G4 1 0;463 A#3 1 0;465 G4 1 0;466 A#3 1 0;464 C#4 1 0;467 C#4 1 0;468 G4 1 0;471 G4 1 0;474 G4 1 0;477 G4 1 0;469 A#3 1 0;470 C#4 1 0;472 A#3 1 0;473 C#4 1 0;475 A#3 1 0;476 C#4 1 0;478 A#3 1 0;479 C#4 1 0;468 C#7 1 0;468 C#6 1 0;468 F6 1 0;468 A#6 1 0;474 C7 1 0;474 A#6 1 0;474 F6 1 0;474 C6 1 0;480 F3 1 0;480 F2 1 0;480 F5 1 0;480 F4 1 0;480 C5 1 0;488 G5 1 0;490 G#5 1 0;492 F5 1 0;492 F4 1 0;492 C#5 1 0;492 C#3 1 0;492 C#4 1 0;488 G4 1 0;490 G#4 1 0;500 G5 1 0;502 G#5 1 0;504 F5 1 0;504 F4 1 0;500 G4 1 0;502 G#4 1 0;504 B4 1 0;504 B3 1 0;504 B2 1 0;512 G5 1 0;514 G#5 1 0;516 F5 1 0;522 E5 1 0;512 G4 1 0;514 G#4 1 0;516 F4 1 0;522 E4 1 0;516 C5 1 0;516 C4 1 0;516 C3 1 0;522 C5 1 0;528 A#3 1 0;528 A#2 1 0;528 F4 1 0;528 F5 1 0;528 A#4 1 0;536 G5 1 0;538 G#5 1 0;540 F5 1 0;536 G4 1 0;538 G#4 1 0;540 F4 1 0;540 G#4 1 0;540 G#3 1 0;540 G#2 1 0;548 G4 1 0;550 G#4 1 0;552 F4 1 0;548 G5 1 0;550 G#5 1 0;552 F5 1 0;552 B4 1 0;552 B3 1 0;552 B2 1 0;560 G5 1 0;562 G#5 1 0;564 F5 1 0;570 E5 1 0;560 G4 1 0;562 G#4 1 0;564 F4 1 0;570 E4 1 0;564 C5 1 0;564 C4 1 0;564 C3 1 0;570 C5 1 0;576 F2 1 0;576 F3 1 0;578 F3 1 0;580 F3 1 0;582 F3 1 0;584 F3 1 0;586 F3 1 0;588 F3 1 0;590 F3 1 0;592 F3 1 0;594 F3 1 0;596 F3 1 0;598 F3 1 0;578 C4 1 0;580 C4 1 0;582 C4 1 0;584 C4 1 0;586 C4 1 0;588 C4 1 0;590 C4 1 0;592 C4 1 0;594 C4 1 0;596 C4 1 0;598 C4 1 0;576 C5 1 0;582 C#5 1 0;588 C5 1 0;576 F4 1 0;582 F4 1 0;588 F4 1 0;594 B4 1 0;594 F4 1 0;600 C5 1 0;606 C#5 1 0;612 C5 1 0;618 B4 1 0;600 F4 1 0;606 F4 1 0;612 F4 1 0;618 F4 1 0;602 C4 1 0;604 C4 1 0;606 C4 1 0;608 C4 1 0;610 C4 1 0;612 C4 1 0;614 C4 1 0;616 C4 1 0;618 C4 1 0;620 C4 1 0;622 C4 1 0;600 F3 1 0;602 F3 1 0;604 F3 1 0;606 F3 1 0;608 F3 1 0;610 F3 1 0;612 F3 1 0;614 F3 1 0;616 F3 1 0;618 F3 1 0;620 F3 1 0;622 F3 1 0;600 F2 1 0;624 F3 1 0;624 F2 1 0;626 C5 1 0;627 C#5 1 0;628 C5 1 0;626 F4 1 0;632 C5 1 0;633 C#5 1 0;634 C5 1 0;632 F4 1 0;630 F3 1 0;630 F2 1 0;648 F3 1 0;654 F3 1 0;648 F2 1 0;654 F2 1 0;650 C5 1 0;651 C#5 1 0;652 C5 1 0;650 F4 1 0;656 C5 1 0;657 C#5 1 0;658 C5 1 0;656 F4 1 0;672 C2 1 17'
# https://onlinesequencer.net/49771 - Pokemon HGSS Dark Cave / Ice Path
#song = '200 C7 8 0;208 D7 2 0;210 C7 2 0;212 A#6 2 0;214 G6 4 0;218 A#6 2 0;220 D7 4 0;224 C#7 16 0;240 C7 16 0;264 C7 8 0;272 D7 2 0;274 C7 2 0;276 A#6 2 0;278 G6 4 0;282 A#6 2 0;284 D7 4 0;288 C#7 12 0;300 C7 2 0;302 C#7 2 0;304 D#7 16 0;320 F6 4 0;324 D#6 2 0;326 D6 2 0;328 D#6 4 0;332 D6 2 0;334 C6 2 0;336 D6 2 0;338 C6 2 0;340 A#5 2 0;342 C6 2 0;344 D6 4 0;348 C6 2 0;350 A#5 2 0;352 C#6 8 0;360 A#5 8 0;368 D#6 2 0;370 D6 2 0;372 C6 2 0;374 A#5 2 0;376 C6 8 0;384 F6 4 0;388 D#6 2 0;390 D6 2 0;392 D#6 4 0;396 D6 2 0;398 C6 2 0;400 D6 2 0;402 C6 2 0;404 A#5 2 0;406 C6 2 0;408 D6 4 0;412 C6 2 0;414 A#5 2 0;416 C#6 4 0;420 C6 2 0;422 C#6 2 0;424 C6 2 0;426 C#6 2 0;428 A#5 2 0;430 C#6 2 0;432 C6 16 0;456 C7 8 0;464 D7 2 0;466 C7 2 0;468 A#6 2 0;470 G6 4 0;474 A#6 2 0;476 D7 4 0;480 C#7 16 0;496 C7 16 0;520 C7 8 0;528 D7 2 0;530 C7 2 0;532 A#6 2 0;534 G6 4 0;538 A#6 2 0;540 D7 4 0;544 C#7 16 0;560 C7 16 0;600 A#5 2 0;602 C6 2 0;604 C#6 4 0;608 C6 16 0;632 C#6 2 0;634 C6 2 0;636 C#6 4 0;640 C6 16 0;656 D#6 2 0;664 A#5 2 0;666 C6 2 0;668 C#6 4 0;672 C6 16 0;688 A#5 2 0;696 C#6 2 0;698 C6 2 0;700 C#6 4 0;768 D#7 16 0;784 D7 16 0;800 F7 16 0;816 D#7 16 0;904 C7 8 0;912 D7 2 0;914 C7 2 0;916 A#6 2 0;918 G6 4 0;922 A#6 2 0;924 D7 4 0;928 C#7 16 0;944 C7 16 0;968 C7 8 0;976 D7 2 0;978 C7 2 0;980 A#6 2 0;982 G6 4 0;986 A#6 2 0;988 D7 4 0;992 C#7 12 0;1004 C7 2 0;1006 C#7 2 0;1008 D#7 16 0;1024 F6 4 0;1028 D#6 2 0;1030 D6 2 0;1032 D#6 4 0;1036 D6 2 0;1038 C6 2 0;1040 D6 2 0;1042 C6 2 0;1044 A#5 2 0;1046 C6 2 0;1048 D6 4 0;1052 C6 2 0;1054 A#5 2 0;1056 C#6 8 0;1064 A#5 8 0;1072 D#6 2 0;1074 D6 2 0;1076 C6 2 0;1078 A#5 2 0;1080 C6 8 0;1088 F6 4 0;1092 D#6 2 0;1094 D6 2 0;1096 D#6 4 0;1100 D6 2 0;1102 C6 2 0;1104 D6 2 0;1106 C6 2 0;1108 A#5 2 0;1110 C6 2 0;1112 D6 4 0;1116 C6 2 0;1118 A#5 2 0;1120 C#6 4 0;1124 C6 2 0;1126 C#6 2 0;1128 C6 2 0;1130 C#6 2 0;1132 A#5 2 0;1134 C#6 2 0;1136 C6 16 0;1160 C7 8 0;1168 D7 2 0;1170 C7 2 0;1172 A#6 2 0;1174 G6 4 0;1178 A#6 2 0;1180 D7 4 0;1184 C#7 16 0;1200 C7 16 0;1224 C7 8 0;1232 D7 2 0;1234 C7 2 0;1236 A#6 2 0;1238 G6 4 0;1242 A#6 2 0;1244 D7 4 0;1248 C#7 16 0;1264 C7 16 0;1304 A#5 2 0;1306 C6 2 0;1308 C#6 4 0;1312 C6 16 0;1336 C#6 2 0;1338 C6 2 0;1340 C#6 4 0;1344 C6 16 0;1360 D#6 2 0;1368 A#5 2 0;1370 C6 2 0;1372 C#6 4 0;1376 C6 16 0;1392 A#5 2 0;1400 C#6 2 0;1402 C6 2 0;1404 C#6 4 0;1472 D#7 16 0;1488 D7 16 0;1504 F7 16 0;1520 D#7 16 0;64 G5 12 0;64 D#5 12 0;80 A#5 12 0;80 D5 12 0;96 F5 12 0;96 C#5 12 0;112 G5 12 0;112 C5 12 0;128 D#6 2 0;130 G#5 2 0;132 C6 2 0;134 D#6 2 0;136 G#5 2 0;138 C6 2 0;140 D#6 2 0;142 C6 2 0;144 D6 2 0;146 G5 2 0;148 A#5 2 0;150 D6 2 0;152 G5 2 0;154 A#5 2 0;156 D6 2 0;158 A#5 2 0;160 C#6 2 0;162 F5 2 0;164 A#5 2 0;166 C#6 2 0;168 F5 2 0;170 A#5 2 0;172 C#6 2 0;174 A#5 2 0;176 C6 2 0;178 G5 2 0;180 A#5 2 0;182 C6 2 0;184 G5 2 0;186 A#5 2 0;188 C6 2 0;190 A#5 2 0;192 D#6 2 0;194 G#5 2 0;196 C6 2 0;198 D#6 2 0;200 G#5 2 0;202 C6 2 0;204 D#6 2 0;206 C6 2 0;208 D6 2 0;210 G5 2 0;212 A#5 2 0;214 D6 2 0;216 G5 2 0;218 A#5 2 0;220 D6 2 0;222 A#5 2 0;224 C#6 2 0;226 F5 2 0;228 A#5 2 0;230 C#6 2 0;232 F5 2 0;234 A#5 2 0;236 C#6 2 0;238 A#5 2 0;240 C6 2 0;242 G5 2 0;244 A#5 2 0;246 C6 2 0;248 G5 2 0;250 A#5 2 0;252 C6 2 0;254 A#5 2 0;256 D#6 2 0;258 G#5 2 0;260 C6 2 0;262 D#6 2 0;264 G#5 2 0;266 C6 2 0;268 D#6 2 0;270 C6 2 0;272 D6 2 0;274 G5 2 0;276 A#5 2 0;278 D6 2 0;280 G5 2 0;282 A#5 2 0;284 D6 2 0;286 A#5 2 0;288 C#6 2 0;290 F5 2 0;292 A#5 2 0;294 C#6 2 0;296 F5 2 0;298 A#5 2 0;300 C#6 2 0;302 A#5 2 0;304 C6 2 0;306 G5 2 0;308 A#5 2 0;310 C6 2 0;312 G5 2 0;314 A#5 2 0;316 C6 2 0;318 A#5 2 0;320 G#5 8 0;328 D#5 8 0;336 G5 8 0;344 D5 8 0;352 F5 8 0;360 C#5 8 0;368 D#5 8 0;376 G5 8 0;384 G#5 8 0;392 D#5 8 0;400 G5 8 0;408 D5 8 0;416 F5 8 0;424 C#5 8 0;432 D#5 8 0;440 C5 8 0;448 D#6 2 0;450 G#5 2 0;452 C6 2 0;454 D#6 2 0;456 G#5 2 0;458 C6 2 0;460 D#6 2 0;462 C6 2 0;464 D6 2 0;466 G5 2 0;468 A#5 2 0;470 D6 2 0;472 G5 2 0;474 A#5 2 0;476 D6 2 0;478 A#5 2 0;480 C#6 2 0;482 F5 2 0;484 A#5 2 0;486 C#6 2 0;488 F5 2 0;490 A#5 2 0;492 C#6 2 0;494 A#5 2 0;496 C6 2 0;498 G5 2 0;500 A#5 2 0;502 C6 2 0;504 G5 2 0;506 A#5 2 0;508 C6 2 0;510 A#5 2 0;512 D#6 2 0;514 G#5 2 0;516 C6 2 0;518 D#6 2 0;520 G#5 2 0;522 C6 2 0;524 D#6 2 0;526 C6 2 0;528 D6 2 0;530 G5 2 0;532 A#5 2 0;534 D6 2 0;536 G5 2 0;538 A#5 2 0;540 D6 2 0;542 A#5 2 0;544 C#6 2 0;546 F5 2 0;548 A#5 2 0;550 C#6 2 0;552 F5 2 0;554 A#5 2 0;556 C#6 2 0;558 A#5 2 0;560 C6 2 0;562 G5 2 0;564 A#5 2 0;566 C6 2 0;568 G5 2 0;570 A#5 2 0;572 C6 2 0;574 A#5 2 0;576 C5 4 0;580 G5 2 0;582 D#5 4 0;586 G5 2 0;588 F5 4 0;592 D#5 2 0;596 A#4 12 0;608 C5 4 0;612 G5 2 0;614 D#5 4 0;618 G5 2 0;620 D#5 4 0;624 F5 2 0;628 A#4 12 0;640 D#5 16 0;656 G5 2 0;664 F5 2 0;666 G5 2 0;668 G#5 4 0;672 G5 16 0;696 G#5 2 0;698 G5 2 0;700 G#5 4 0;704 D#6 2 0;706 G#5 2 0;708 C6 2 0;710 D#6 2 0;712 G#5 2 0;714 C6 2 0;716 D#6 2 0;718 C6 2 0;720 D6 2 0;722 G5 2 0;724 A#5 2 0;726 D6 2 0;728 G5 2 0;730 A#5 2 0;732 D6 2 0;734 A#5 2 0;736 C#6 2 0;738 F5 2 0;740 A#5 2 0;742 C#6 2 0;744 F5 2 0;746 A#5 2 0;748 C#6 2 0;750 A#5 2 0;752 C6 2 0;754 G5 2 0;756 A#5 2 0;758 C6 2 0;760 G5 2 0;762 A#5 2 0;764 C6 2 0;766 A#5 2 0;768 D#6 2 0;770 G#5 2 0;772 C6 2 0;774 D#6 2 0;776 G#5 2 0;778 C6 2 0;780 D#6 2 0;782 C6 2 0;784 D6 2 0;786 G5 2 0;788 A#5 2 0;790 D6 2 0;792 G5 2 0;794 A#5 2 0;796 D6 2 0;798 A#5 2 0;800 C#6 2 0;802 F5 2 0;804 A#5 2 0;806 C#6 2 0;808 F5 2 0;810 A#5 2 0;812 C#6 2 0;814 A#5 2 0;816 C6 2 0;818 G5 2 0;820 A#5 2 0;822 C6 2 0;824 G5 2 0;826 A#5 2 0;828 C6 2 0;830 A#5 2 0;832 D#6 2 0;834 G#5 2 0;836 C6 2 0;838 D#6 2 0;840 G#5 2 0;842 C6 2 0;844 D#6 2 0;846 C6 2 0;848 D6 2 0;850 G5 2 0;852 A#5 2 0;854 D6 2 0;856 G5 2 0;858 A#5 2 0;860 D6 2 0;862 A#5 2 0;864 C#6 2 0;866 F5 2 0;868 A#5 2 0;870 C#6 2 0;872 F5 2 0;874 A#5 2 0;876 C#6 2 0;878 A#5 2 0;880 C6 2 0;882 G5 2 0;884 A#5 2 0;886 C6 2 0;888 G5 2 0;890 A#5 2 0;892 C6 2 0;894 A#5 2 0;896 D#6 2 0;898 G#5 2 0;900 C6 2 0;902 D#6 2 0;904 G#5 2 0;906 C6 2 0;908 D#6 2 0;910 C6 2 0;912 D6 2 0;914 G5 2 0;916 A#5 2 0;918 D6 2 0;920 G5 2 0;922 A#5 2 0;924 D6 2 0;926 A#5 2 0;928 C#6 2 0;930 F5 2 0;932 A#5 2 0;934 C#6 2 0;936 F5 2 0;938 A#5 2 0;940 C#6 2 0;942 A#5 2 0;944 C6 2 0;946 G5 2 0;948 A#5 2 0;950 C6 2 0;952 G5 2 0;954 A#5 2 0;956 C6 2 0;958 A#5 2 0;960 D#6 2 0;962 G#5 2 0;964 C6 2 0;966 D#6 2 0;968 G#5 2 0;970 C6 2 0;972 D#6 2 0;974 C6 2 0;976 D6 2 0;978 G5 2 0;980 A#5 2 0;982 D6 2 0;984 G5 2 0;986 A#5 2 0;988 D6 2 0;990 A#5 2 0;992 C#6 2 0;994 F5 2 0;996 A#5 2 0;998 C#6 2 0;1000 F5 2 0;1002 A#5 2 0;1004 C#6 2 0;1006 A#5 2 0;1008 C6 2 0;1010 G5 2 0;1012 A#5 2 0;1014 C6 2 0;1016 G5 2 0;1018 A#5 2 0;1020 C6 2 0;1022 A#5 2 0;1024 G#5 8 0;1032 D#5 8 0;1040 G5 8 0;1048 D5 8 0;1056 F5 8 0;1064 C#5 8 0;1072 D#5 8 0;1080 G5 8 0;1088 G#5 8 0;1096 D#5 8 0;1104 G5 8 0;1112 D5 8 0;1120 F5 8 0;1128 C#5 8 0;1136 D#5 8 0;1144 C5 8 0;1152 D#6 2 0;1154 G#5 2 0;1156 C6 2 0;1158 D#6 2 0;1160 G#5 2 0;1162 C6 2 0;1164 D#6 2 0;1166 C6 2 0;1168 D6 2 0;1170 G5 2 0;1172 A#5 2 0;1174 D6 2 0;1176 G5 2 0;1178 A#5 2 0;1180 D6 2 0;1182 A#5 2 0;1184 C#6 2 0;1186 F5 2 0;1188 A#5 2 0;1190 C#6 2 0;1192 F5 2 0;1194 A#5 2 0;1196 C#6 2 0;1198 A#5 2 0;1200 C6 2 0;1202 G5 2 0;1204 A#5 2 0;1206 C6 2 0;1208 G5 2 0;1210 A#5 2 0;1212 C6 2 0;1214 A#5 2 0;1216 D#6 2 0;1218 G#5 2 0;1220 C6 2 0;1222 D#6 2 0;1224 G#5 2 0;1226 C6 2 0;1228 D#6 2 0;1230 C6 2 0;1232 D6 2 0;1234 G5 2 0;1236 A#5 2 0;1238 D6 2 0;1240 G5 2 0;1242 A#5 2 0;1244 D6 2 0;1246 A#5 2 0;1248 C#6 2 0;1250 F5 2 0;1252 A#5 2 0;1254 C#6 2 0;1256 F5 2 0;1258 A#5 2 0;1260 C#6 2 0;1262 A#5 2 0;1264 C6 2 0;1266 G5 2 0;1268 A#5 2 0;1270 C6 2 0;1272 G5 2 0;1274 A#5 2 0;1276 C6 2 0;1278 A#5 2 0;1280 C5 4 0;1284 G5 2 0;1286 D#5 4 0;1290 G5 2 0;1292 F5 4 0;1296 D#5 2 0;1300 A#4 12 0;1312 C5 4 0;1316 G5 2 0;1318 D#5 4 0;1322 G5 2 0;1324 D#5 4 0;1328 F5 2 0;1332 A#4 12 0;1344 D#5 16 0;1360 G5 2 0;1368 F5 2 0;1370 G5 2 0;1372 G#5 4 0;1376 G5 16 0;1400 G#5 2 0;1402 G5 2 0;1404 G#5 4 0;1408 D#6 2 0;1410 G#5 2 0;1412 C6 2 0;1414 D#6 2 0;1416 G#5 2 0;1418 C6 2 0;1420 D#6 2 0;1422 C6 2 0;1424 D6 2 0;1426 G5 2 0;1428 A#5 2 0;1430 D6 2 0;1432 G5 2 0;1434 A#5 2 0;1436 D6 2 0;1438 A#5 2 0;1440 C#6 2 0;1442 F5 2 0;1444 A#5 2 0;1446 C#6 2 0;1448 F5 2 0;1450 A#5 2 0;1452 C#6 2 0;1454 A#5 2 0;1456 C6 2 0;1458 G5 2 0;1460 A#5 2 0;1462 C6 2 0;1464 G5 2 0;1466 A#5 2 0;1468 C6 2 0;1470 A#5 2 0;1472 D#6 2 0;1474 G#5 2 0;1476 C6 2 0;1478 D#6 2 0;1480 G#5 2 0;1482 C6 2 0;1484 D#6 2 0;1486 C6 2 0;1488 D6 2 0;1490 G5 2 0;1492 A#5 2 0;1494 D6 2 0;1496 G5 2 0;1498 A#5 2 0;1500 D6 2 0;1502 A#5 2 0;1504 C#6 2 0;1506 F5 2 0;1508 A#5 2 0;1510 C#6 2 0;1512 F5 2 0;1514 A#5 2 0;1516 C#6 2 0;1518 A#5 2 0;1520 C6 2 0;1522 G5 2 0;1524 A#5 2 0;1526 C6 2 0;1528 G5 2 0;1530 A#5 2 0;1532 C6 2 0;1534 A#5 2 0;0 C4 4 0;4 G4 2 0;6 D#4 4 0;10 G4 2 0;12 F4 4 0;16 D#4 2 0;20 A#3 6 0;26 C4 2 0;28 C#4 4 0;32 C4 4 0;36 G4 2 0;38 D#4 4 0;42 G4 2 0;44 D#4 4 0;48 F4 2 0;52 A#3 4 0;56 C#4 2 0;58 C4 2 0;60 C#4 4 0;64 C4 4 0;68 G4 2 0;70 D#4 4 0;74 G4 2 0;76 F4 4 0;80 D#4 2 0;84 A#3 6 0;90 C4 2 0;92 C#4 4 0;96 C4 4 0;100 G4 2 0;102 D#4 4 0;106 G4 2 0;108 D#4 4 0;112 F4 2 0;116 A#3 4 0;120 C#4 2 0;122 C4 2 0;124 C#4 4 0;128 C4 4 0;132 G4 2 0;134 D#4 4 0;138 G4 2 0;140 F4 4 0;144 D#4 2 0;148 A#3 6 0;154 C4 2 0;156 C#4 4 0;160 C4 4 0;164 G4 2 0;166 D#4 4 0;170 G4 2 0;172 D#4 4 0;176 F4 2 0;180 A#3 4 0;184 C#4 2 0;186 C4 2 0;188 C#4 4 0;192 C4 4 0;196 G4 2 0;198 D#4 4 0;202 G4 2 0;204 F4 4 0;208 D#4 2 0;212 A#3 6 0;218 C4 2 0;220 C#4 4 0;224 C4 4 0;228 G4 2 0;230 D#4 4 0;234 G4 2 0;236 D#4 4 0;240 F4 2 0;244 A#3 4 0;248 C#4 2 0;250 C4 2 0;252 C#4 4 0;256 C4 4 0;260 G4 2 0;262 D#4 4 0;266 G4 2 0;268 F4 4 0;272 D#4 2 0;276 A#3 6 0;282 C4 2 0;284 C#4 4 0;288 C4 4 0;292 G4 2 0;294 D#4 4 0;298 G4 2 0;300 D#4 4 0;304 F4 2 0;308 A#3 4 0;312 C#4 2 0;314 C4 2 0;316 C#4 4 0;320 C4 4 0;324 G4 2 0;326 D#4 4 0;330 G4 2 0;332 F4 4 0;336 D#4 2 0;340 A#3 6 0;346 C4 2 0;348 C#4 4 0;352 C4 4 0;356 G4 2 0;358 D#4 4 0;362 G4 2 0;364 D#4 4 0;368 F4 2 0;372 A#3 4 0;376 C#4 2 0;378 C4 2 0;380 C#4 4 0;384 C4 4 0;388 G4 2 0;390 D#4 4 0;394 G4 2 0;396 F4 4 0;400 D#4 2 0;404 A#3 6 0;410 C4 2 0;412 C#4 4 0;416 C4 4 0;420 G4 2 0;422 D#4 4 0;426 G4 2 0;428 D#4 4 0;432 F4 2 0;436 A#3 4 0;440 C#4 2 0;442 C4 2 0;444 C#4 4 0;448 C4 4 0;452 G4 2 0;454 D#4 4 0;458 G4 2 0;460 F4 4 0;464 D#4 2 0;468 A#3 6 0;474 C4 2 0;476 C#4 4 0;480 C4 4 0;484 G4 2 0;486 D#4 4 0;490 G4 2 0;492 D#4 4 0;496 F4 2 0;500 A#3 4 0;504 C#4 2 0;506 C4 2 0;508 C#4 4 0;512 C4 4 0;516 G4 2 0;518 D#4 4 0;522 G4 2 0;524 F4 4 0;528 D#4 2 0;532 A#3 6 0;538 C4 2 0;540 C#4 4 0;544 C4 4 0;548 G4 2 0;550 D#4 4 0;554 G4 2 0;556 D#4 4 0;560 F4 2 0;564 A#3 4 0;568 C#4 2 0;570 C4 2 0;572 C#4 4 0;600 F4 2 0;602 G4 2 0;604 G#4 4 0;608 D#5 16 0;632 G#4 2 0;634 G4 2 0;636 G#4 4 0;640 C5 4 0;644 G5 2 0;646 D#5 4 0;650 G5 2 0;652 F5 4 0;656 D#5 2 0;660 A#4 12 0;672 C5 4 0;676 G5 2 0;678 D#5 4 0;682 G5 2 0;684 D#5 4 0;688 F5 2 0;692 A#4 4 0;696 C#5 2 0;698 C5 2 0;700 C#5 4 0;704 C5 4 0;708 G5 2 0;710 D#5 4 0;714 G5 2 0;716 F5 4 0;720 D#5 2 0;724 A#4 6 0;730 C5 2 0;732 C#5 4 0;736 C5 4 0;740 G5 2 0;742 D#5 4 0;746 G5 2 0;748 D#5 4 0;752 F5 2 0;756 A#4 4 0;760 C#5 2 0;762 C5 2 0;764 C#5 4 0;768 C5 4 0;772 G5 2 0;774 D#5 4 0;778 G5 2 0;780 F5 4 0;784 D#5 2 0;788 A#4 6 0;794 C5 2 0;796 C#5 4 0;800 C5 4 0;804 G5 2 0;806 D#5 4 0;810 G5 2 0;812 D#5 4 0;816 F5 2 0;820 A#4 4 0;824 C#5 2 0;826 C5 2 0;828 C#5 4 0;832 C4 4 0;836 G4 2 0;838 D#4 4 0;842 G4 2 0;844 F4 4 0;848 D#4 2 0;852 A#3 6 0;858 C4 2 0;860 C#4 4 0;864 C4 4 0;868 G4 2 0;870 D#4 4 0;874 G4 2 0;876 D#4 4 0;880 F4 2 0;884 A#3 4 0;888 C#4 2 0;890 C4 2 0;892 C#4 4 0;896 C4 4 0;900 G4 2 0;902 D#4 4 0;906 G4 2 0;908 F4 4 0;912 D#4 2 0;916 A#3 6 0;922 C4 2 0;924 C#4 4 0;928 C4 4 0;932 G4 2 0;934 D#4 4 0;938 G4 2 0;940 D#4 4 0;944 F4 2 0;948 A#3 4 0;952 C#4 2 0;954 C4 2 0;956 C#4 4 0;960 C4 4 0;964 G4 2 0;966 D#4 4 0;970 G4 2 0;972 F4 4 0;976 D#4 2 0;980 A#3 6 0;986 C4 2 0;988 C#4 4 0;992 C4 4 0;996 G4 2 0;998 D#4 4 0;1002 G4 2 0;1004 D#4 4 0;1008 F4 2 0;1012 A#3 4 0;1016 C#4 2 0;1018 C4 2 0;1020 C#4 4 0;1024 C4 4 0;1028 G4 2 0;1030 D#4 4 0;1034 G4 2 0;1036 F4 4 0;1040 D#4 2 0;1044 A#3 6 0;1050 C4 2 0;1052 C#4 4 0;1056 C4 4 0;1060 G4 2 0;1062 D#4 4 0;1066 G4 2 0;1068 D#4 4 0;1072 F4 2 0;1076 A#3 4 0;1080 C#4 2 0;1082 C4 2 0;1084 C#4 4 0;1088 C4 4 0;1092 G4 2 0;1094 D#4 4 0;1098 G4 2 0;1100 F4 4 0;1104 D#4 2 0;1108 A#3 6 0;1114 C4 2 0;1116 C#4 4 0;1120 C4 4 0;1124 G4 2 0;1126 D#4 4 0;1130 G4 2 0;1132 D#4 4 0;1136 F4 2 0;1140 A#3 4 0;1144 C#4 2 0;1146 C4 2 0;1148 C#4 4 0;1152 C4 4 0;1156 G4 2 0;1158 D#4 4 0;1162 G4 2 0;1164 F4 4 0;1168 D#4 2 0;1172 A#3 6 0;1178 C4 2 0;1180 C#4 4 0;1184 C4 4 0;1188 G4 2 0;1190 D#4 4 0;1194 G4 2 0;1196 D#4 4 0;1200 F4 2 0;1204 A#3 4 0;1208 C#4 2 0;1210 C4 2 0;1212 C#4 4 0;1216 C4 4 0;1220 G4 2 0;1222 D#4 4 0;1226 G4 2 0;1228 F4 4 0;1232 D#4 2 0;1236 A#3 6 0;1242 C4 2 0;1244 C#4 4 0;1248 C4 4 0;1252 G4 2 0;1254 D#4 4 0;1258 G4 2 0;1260 D#4 4 0;1264 F4 2 0;1268 A#3 4 0;1272 C#4 2 0;1274 C4 2 0;1276 C#4 4 0;1304 F4 2 0;1306 G4 2 0;1308 G#4 4 0;1312 D#5 16 0;1336 G#4 2 0;1338 G4 2 0;1340 G#4 4 0;1344 C5 4 0;1348 G5 2 0;1350 D#5 4 0;1354 G5 2 0;1356 F5 4 0;1360 D#5 2 0;1364 A#4 12 0;1376 C5 4 0;1380 G5 2 0;1382 D#5 4 0;1386 G5 2 0;1388 D#5 4 0;1392 F5 2 0;1396 A#4 4 0;1400 C#5 2 0;1402 C5 2 0;1404 C#5 4 0;1408 C5 4 0;1412 G5 2 0;1414 D#5 4 0;1418 G5 2 0;1420 F5 4 0;1424 D#5 2 0;1428 A#4 6 0;1434 C5 2 0;1436 C#5 4 0;1440 C5 4 0;1444 G5 2 0;1446 D#5 4 0;1450 G5 2 0;1452 D#5 4 0;1456 F5 2 0;1460 A#4 4 0;1464 C#5 2 0;1466 C5 2 0;1468 C#5 4 0;1472 C5 4 0;1476 G5 2 0;1478 D#5 4 0;1482 G5 2 0;1484 F5 4 0;1488 D#5 2 0;1492 A#4 6 0;1498 C5 2 0;1500 C#5 4 0;1504 C5 4 0;1508 G5 2 0;1510 D#5 4 0;1514 G5 2 0;1516 D#5 4 0;1520 F5 2 0;1524 A#4 4 0;1528 C#5 2 0;1530 C5 2 0;1532 C#5 4 0;0 C3 4 0;4 G3 2 0;6 D#3 4 0;10 G3 2 0;12 F3 4 0;16 D#3 2 0;20 A#2 6 0;26 C3 2 0;28 C#3 4 0;32 C3 4 0;36 G3 2 0;38 D#3 4 0;42 G3 2 0;44 D#3 4 0;48 F3 2 0;52 A#2 4 0;56 C#3 2 0;58 C3 2 0;60 C#3 4 0;64 C3 4 0;68 G3 2 0;70 D#3 4 0;74 G3 2 0;76 F3 4 0;80 D#3 2 0;84 A#2 6 0;90 C3 2 0;92 C#3 4 0;96 C3 4 0;100 G3 2 0;102 D#3 4 0;106 G3 2 0;108 D#3 4 0;112 F3 2 0;116 A#2 4 0;120 C#3 2 0;122 C3 2 0;124 C#3 4 0;128 C3 4 0;132 G3 2 0;134 D#3 4 0;138 G3 2 0;140 F3 4 0;144 D#3 2 0;148 A#2 6 0;154 C3 2 0;156 C#3 4 0;160 C3 4 0;164 G3 2 0;166 D#3 4 0;170 G3 2 0;172 D#3 4 0;176 F3 2 0;180 A#2 4 0;184 C#3 2 0;186 C3 2 0;188 C#3 4 0;192 C3 4 0;196 G3 2 0;198 D#3 4 0;202 G3 2 0;204 F3 4 0;208 D#3 2 0;212 A#2 6 0;218 C3 2 0;220 C#3 4 0;224 C3 4 0;228 G3 2 0;230 D#3 4 0;234 G3 2 0;236 D#3 4 0;240 F3 2 0;244 A#2 4 0;248 C#3 2 0;250 C3 2 0;252 C#3 4 0;256 C3 4 0;260 G3 2 0;262 D#3 4 0;266 G3 2 0;268 F3 4 0;272 D#3 2 0;276 A#2 6 0;282 C3 2 0;284 C#3 4 0;288 C3 4 0;292 G3 2 0;294 D#3 4 0;298 G3 2 0;300 D#3 4 0;304 F3 2 0;308 A#2 4 0;312 C#3 2 0;314 C3 2 0;316 C#3 4 0;320 C3 4 0;324 G3 2 0;326 D#3 4 0;330 G3 2 0;332 F3 4 0;336 D#3 2 0;340 A#2 6 0;346 C3 2 0;348 C#3 4 0;352 C3 4 0;356 G3 2 0;358 D#3 4 0;362 G3 2 0;364 D#3 4 0;368 F3 2 0;372 A#2 4 0;376 C#3 2 0;378 C3 2 0;380 C#3 4 0;384 C3 4 0;388 G3 2 0;390 D#3 4 0;394 G3 2 0;396 F3 4 0;400 D#3 2 0;404 A#2 6 0;410 C3 2 0;412 C#3 4 0;416 C3 4 0;420 G3 2 0;422 D#3 4 0;426 G3 2 0;428 D#3 4 0;432 F3 2 0;436 A#2 4 0;440 C#3 2 0;442 C3 2 0;444 C#3 4 0;448 C3 4 0;452 G3 2 0;454 D#3 4 0;458 G3 2 0;460 F3 4 0;464 D#3 2 0;468 A#2 6 0;474 C3 2 0;476 C#3 4 0;480 C3 4 0;484 G3 2 0;486 D#3 4 0;490 G3 2 0;492 D#3 4 0;496 F3 2 0;500 A#2 4 0;504 C#3 2 0;506 C3 2 0;508 C#3 4 0;512 C3 4 0;516 G3 2 0;518 D#3 4 0;522 G3 2 0;524 F3 4 0;528 D#3 2 0;532 A#2 6 0;538 C3 2 0;540 C#3 4 0;544 C3 4 0;548 G3 2 0;550 D#3 4 0;554 G3 2 0;556 D#3 4 0;560 F3 2 0;564 A#2 4 0;568 C#3 2 0;570 C3 2 0;572 C#3 4 0;576 C4 4 0;580 G4 2 0;582 D#4 4 0;586 G4 2 0;588 F4 4 0;592 D#4 2 0;596 A#3 12 0;608 C4 4 0;612 G4 2 0;614 D#4 4 0;618 G4 2 0;620 D#4 4 0;624 F4 2 0;628 A#3 12 0;640 C4 4 0;644 G4 2 0;646 D#4 4 0;650 G4 2 0;652 F4 4 0;656 D#4 2 0;660 A#3 12 0;672 C4 4 0;676 G4 2 0;678 D#4 4 0;682 G4 2 0;684 D#4 4 0;688 F4 2 0;692 A#3 4 0;696 C#4 2 0;698 C4 2 0;700 C#4 4 0;704 C4 4 0;708 G4 2 0;710 D#4 4 0;714 G4 2 0;716 F4 4 0;720 D#4 2 0;724 A#3 6 0;730 C4 2 0;732 C#4 4 0;736 C4 4 0;740 G4 2 0;742 D#4 4 0;746 G4 2 0;748 D#4 4 0;752 F4 2 0;756 A#3 4 0;760 C#4 2 0;762 C4 2 0;764 C#4 4 0;768 C4 4 0;772 G4 2 0;774 D#4 4 0;778 G4 2 0;780 F4 4 0;784 D#4 2 0;788 A#3 6 0;794 C4 2 0;796 C#4 4 0;800 C4 4 0;804 G4 2 0;806 D#4 4 0;810 G4 2 0;812 D#4 4 0;816 F4 2 0;820 A#3 4 0;824 C#4 2 0;826 C4 2 0;828 C#4 4 0;832 C3 4 0;836 G3 2 0;838 D#3 4 0;842 G3 2 0;844 F3 4 0;848 D#3 2 0;852 A#2 6 0;858 C3 2 0;860 C#3 4 0;864 C3 4 0;868 G3 2 0;870 D#3 4 0;874 G3 2 0;876 D#3 4 0;880 F3 2 0;884 A#2 4 0;888 C#3 2 0;890 C3 2 0;892 C#3 4 0;896 C3 4 0;900 G3 2 0;902 D#3 4 0;906 G3 2 0;908 F3 4 0;912 D#3 2 0;916 A#2 6 0;922 C3 2 0;924 C#3 4 0;928 C3 4 0;932 G3 2 0;934 D#3 4 0;938 G3 2 0;940 D#3 4 0;944 F3 2 0;948 A#2 4 0;952 C#3 2 0;954 C3 2 0;956 C#3 4 0;960 C3 4 0;964 G3 2 0;966 D#3 4 0;970 G3 2 0;972 F3 4 0;976 D#3 2 0;980 A#2 6 0;986 C3 2 0;988 C#3 4 0;992 C3 4 0;996 G3 2 0;998 D#3 4 0;1002 G3 2 0;1004 D#3 4 0;1008 F3 2 0;1012 A#2 4 0;1016 C#3 2 0;1018 C3 2 0;1020 C#3 4 0;1024 C3 4 0;1028 G3 2 0;1030 D#3 4 0;1034 G3 2 0;1036 F3 4 0;1040 D#3 2 0;1044 A#2 6 0;1050 C3 2 0;1052 C#3 4 0;1056 C3 4 0;1060 G3 2 0;1062 D#3 4 0;1066 G3 2 0;1068 D#3 4 0;1072 F3 2 0;1076 A#2 4 0;1080 C#3 2 0;1082 C3 2 0;1084 C#3 4 0;1088 C3 4 0;1092 G3 2 0;1094 D#3 4 0;1098 G3 2 0;1100 F3 4 0;1104 D#3 2 0;1108 A#2 6 0;1114 C3 2 0;1116 C#3 4 0;1120 C3 4 0;1124 G3 2 0;1126 D#3 4 0;1130 G3 2 0;1132 D#3 4 0;1136 F3 2 0;1140 A#2 4 0;1144 C#3 2 0;1146 C3 2 0;1148 C#3 4 0;1152 C3 4 0;1156 G3 2 0;1158 D#3 4 0;1162 G3 2 0;1164 F3 4 0;1168 D#3 2 0;1172 A#2 6 0;1178 C3 2 0;1180 C#3 4 0;1184 C3 4 0;1188 G3 2 0;1190 D#3 4 0;1194 G3 2 0;1196 D#3 4 0;1200 F3 2 0;1204 A#2 4 0;1208 C#3 2 0;1210 C3 2 0;1212 C#3 4 0;1216 C3 4 0;1220 G3 2 0;1222 D#3 4 0;1226 G3 2 0;1228 F3 4 0;1232 D#3 2 0;1236 A#2 6 0;1242 C3 2 0;1244 C#3 4 0;1248 C3 4 0;1252 G3 2 0;1254 D#3 4 0;1258 G3 2 0;1260 D#3 4 0;1264 F3 2 0;1268 A#2 4 0;1272 C#3 2 0;1274 C3 2 0;1276 C#3 4 0;1280 C4 4 0;1284 G4 2 0;1286 D#4 4 0;1290 G4 2 0;1292 F4 4 0;1296 D#4 2 0;1300 A#3 12 0;1312 C4 4 0;1316 G4 2 0;1318 D#4 4 0;1322 G4 2 0;1324 D#4 4 0;1328 F4 2 0;1332 A#3 12 0;1344 C4 4 0;1348 G4 2 0;1350 D#4 4 0;1354 G4 2 0;1356 F4 4 0;1360 D#4 2 0;1364 A#3 12 0;1376 C4 4 0;1380 G4 2 0;1382 D#4 4 0;1386 G4 2 0;1388 D#4 4 0;1392 F4 2 0;1396 A#3 4 0;1400 C#4 2 0;1402 C4 2 0;1404 C#4 4 0;1408 C4 4 0;1412 G4 2 0;1414 D#4 4 0;1418 G4 2 0;1420 F4 4 0;1424 D#4 2 0;1428 A#3 6 0;1434 C4 2 0;1436 C#4 4 0;1440 C4 4 0;1444 G4 2 0;1446 D#4 4 0;1450 G4 2 0;1452 D#4 4 0;1456 F4 2 0;1460 A#3 4 0;1464 C#4 2 0;1466 C4 2 0;1468 C#4 4 0;1472 C4 4 0;1476 G4 2 0;1478 D#4 4 0;1482 G4 2 0;1484 F4 4 0;1488 D#4 2 0;1492 A#3 6 0;1498 C4 2 0;1500 C#4 4 0;1504 C4 4 0;1508 G4 2 0;1510 D#4 4 0;1514 G4 2 0;1516 D#4 4 0;1520 F4 2 0;1524 A#3 4 0;1528 C#4 2 0;1530 C4 2 0;1532 C#4 4 0'
# C418 - Haggstrom
#song = '0 G5 9 33;0 C6 9 33;4 D5 1 33;8 C5 1 33;12 C6 9 33;12 G5 9 33;16 D5 1 33;20 C5 1 33;24 A5 1 33;28 C6 9 33;28 G5 9 33;32 D5 1 33;36 C5 1 33;40 C6 9 33;40 G5 9 33;44 D5 1 33;48 C5 1 33;56 C6 9 33;56 G5 9 33;60 D5 1 33;64 C5 1 33;68 C6 9 33;68 G5 9 33;72 D5 1 33;76 C5 1 33;80 A5 1 33;84 C6 9 33;84 G5 9 33;88 D5 1 33;92 C5 1 33;96 C6 9 33;96 G5 9 33;100 D5 1 33;104 C5 1 33;112 C6 4 33;112 G5 4 33;112 C5 1 33;112 C4 8 7;116 B5 4 33;116 D5 1 33;120 C5 1 33;120 G5 3 33;124 C6 4 33;124 G5 3 33;124 C5 1 33;124 C4 8 7;128 B5 4 33;128 D5 1 33;128 G5 4 33;132 C5 4 33;132 E5 4 33;136 A5 4 33;140 C6 4 33;140 G5 4 33;140 C5 1 33;140 C4 8 7;144 D5 1 33;144 B5 4 33;148 C5 1 33;148 G5 3 33;152 C6 4 33;152 G5 4 33;152 C5 1 33;152 C4 8 7;156 D5 4 33;156 B5 8 33;160 C5 6 33;168 C6 4 33;168 G5 3 33;168 C5 1 33;168 E5 1 33;168 C4 8 7;172 B5 4 33;172 G5 3 33;172 D5 4 33;176 C5 3 33;176 G5 3 33;180 C6 4 33;180 G5 3 33;180 E5 1 33;180 C5 1 33;180 C4 8 7;184 B5 4 33;184 G5 3 33;184 D5 4 33;188 C5 3 33;188 E5 3 33;192 A5 4 33;192 D6 4 33;196 C4 8 7;196 C5 7 33;196 G5 4 33;196 E6 4 33;200 D5 4 33;200 G6 4 33;204 C6 4 33;204 C5 3 33;208 E6 4 33;208 G5 4 33;212 G6 4 33;212 D5 4 33;216 C6 4 33;216 C5 4 33;208 C5 7 33;208 C4 7 7;224 A3 8 7;224 E5 4 33;224 B5 4 33;228 D5 4 33;228 A5 4 33;232 E5 3 33;232 A4 4 33;236 B5 4 33;236 E5 4 33;236 A3 8 7;240 D5 4 33;240 A5 4 33;244 G#5 4 33;244 A4 4 33;248 F#4 4 33;252 B5 4 33;252 E5 4 33;252 A3 8 7;256 A5 4 33;256 D5 4 33;260 E5 4 33;260 A4 4 33;264 A3 15 7;264 D5 4 33;268 C#5 4 33;272 A4 4 33;280 A3 8 7;280 E5 4 33;280 B5 4 33;284 D5 4 33;284 A5 4 33;292 B5 4 33;292 E5 4 33;292 A3 8 7;296 D5 4 33;296 A5 4 33;300 G#5 4 33;300 A4 4 33;288 A4 4 33;288 E5 3 33;288 C#6 4 33;300 E6 4 33;304 F#4 4 33;308 E6 3 33;308 C#6 4 33;308 G#6 4 33;308 A3 8 7;320 A3 8 7;320 C#6 4 33;320 E6 3 33;320 G#6 4 33;312 E6 3 33;312 A6 4 33;312 D5 4 33;316 A4 4 33;316 A5 4 33;316 E6 3 33;324 E6 3 33;324 A6 4 33;324 D5 4 33;328 A5 4 33;328 A4 4 33;120 G3 4 7;148 G3 4 7;176 G3 4 7;204 G3 4 7;132 G3 4 7;232 E3 4 7;260 E3 4 7;288 E3 4 7;316 E3 4 7'
# https://onlinesequencer.net/1140127 C418 - Sweden
#song = '64 E3 4 13;64 E4 4 13;64 G4 4 13;64 B4 4 13;68 F#3 4 13;76 B3 4 13;80 A3 4 13;84 G3 4 13;88 D3 4 13;72 D5 4 13;72 A4 4 13;72 F#5 4 13;72 G3 4 13;80 F#4 4 13;80 A4 4 13;80 C#5 4 13;88 A4 4 13;88 C#5 4 13;88 E5 4 13;96 E3 4 13;96 E4 4 13;96 G4 4 13;96 B4 4 13;100 F#3 4 13;108 B3 4 13;112 A3 4 13;116 G3 4 13;120 D3 4 13;104 D5 4 13;104 A4 4 13;104 F#5 4 13;104 G3 4 13;112 F#4 4 13;112 A4 4 13;112 C#5 4 13;120 A4 4 13;120 C#5 4 13;120 E5 4 13;0 E3 4 13;4 F#3 4 13;12 B3 4 13;16 A3 4 13;20 G3 4 13;24 D3 4 13;8 G3 4 13;32 E3 4 13;36 F#3 4 13;44 B3 4 13;48 A3 4 13;52 G3 4 13;56 D3 4 13;40 G3 4 13;0 E4 4 13;0 G4 4 13;8 A4 4 13;8 D5 4 13;16 A4 4 13;16 F#4 4 13;24 A4 4 13;24 C#5 4 13;32 E4 4 13;32 G4 4 13;40 A4 4 13;40 D5 4 13;48 A4 4 13;48 F#4 4 13;56 A4 4 13;56 C#5 4 13;128 E3 4 13;128 E4 4 13;128 G4 4 13;128 B4 4 13;132 F#3 4 13;140 B3 4 13;144 A3 4 13;148 G3 4 13;152 D3 4 13;136 D5 4 13;136 A4 4 13;136 F#5 4 13;136 G3 4 13;144 F#4 4 13;144 A4 4 13;144 C#5 4 13;152 A4 4 13;152 C#5 4 13;152 E5 4 13;132 A5 2 13;134 B5 2 13;142 D5 1 13;143 E5 1 13;150 F#5 1 13;151 A5 1 13;160 E3 4 13;160 E4 2 13;160 G4 2 13;160 B4 2 13;164 F#3 4 13;172 B3 4 13;176 A3 4 13;180 G3 4 13;184 D3 4 13;168 D5 4 13;168 A4 4 13;168 F#5 4 13;168 G3 4 13;176 F#4 4 13;176 A4 4 13;176 C#5 4 13;184 A4 4 13;184 C#5 4 13;184 E5 4 13;162 D6 2 13;164 B5 2 13;166 A5 2 13;174 D5 1 13;175 E5 1 13;182 A5 1 13;183 F#5 1 13'
# https://onlinesequencer.net/1194533 C418 - Wet Hands
#song = '0 A3 1 32;4 E4 1 32;8 A4 1 32;12 B4 1 32;16 C#5 1 32;20 B4 1 32;24 A4 1 32;28 E4 1 32;32 D4 1 32;36 F#4 1 32;40 C#5 1 32;44 E5 1 32;48 C#5 1 32;52 A4 1 32;64 A3 1 32;68 E4 1 32;72 A4 1 32;76 B4 1 32;80 C#5 1 32;84 B4 1 32;88 A4 1 32;92 E4 1 32;96 D4 1 32;100 F#4 1 32;104 C#5 1 32;108 E5 1 32;112 C#5 1 32;116 A4 1 32;128 A3 1 32;132 E4 1 32;136 A4 1 32;140 B4 1 32;144 C#5 1 32;148 B4 1 32;152 A4 1 32;156 E4 1 32;160 D4 1 32;164 F#4 1 32;168 C#5 1 32;172 E5 1 32;176 C#5 1 32;180 A4 1 32;128 G#5 1 32;152 A5 1 32;160 F#5 1 32;184 E5 1 32;192 G#5 1 32;188 F#5 1 32;192 A3 1 32;196 E4 1 32;200 A4 1 32;204 B4 1 32;208 C#5 1 32;212 B4 1 32;216 A4 1 32;220 E4 1 32;224 D4 1 32;228 F#4 1 32;232 C#5 1 32;236 E5 1 32;240 C#5 1 32;244 A5 1 32;216 B5 1 32;220 C#6 1 32;228 F#5 1 32;248 C#6 1 32;252 E6 1 32;256 G6 1 32;256 G3 1 32;260 B3 1 32;264 D4 1 32;268 F#4 1 32;272 A4 1 32;268 F#6 1 32;244 A4 1 32;272 D6 1 32;276 F#4 1 32;280 D4 1 32;284 B3 1 32;288 G3 1 32;292 B3 1 32;296 D4 1 32;300 F#4 1 32;304 A4 1 32;280 A5 1 32;284 B5 1 32;320 G6 1 32;320 G3 1 32;324 B3 1 32;328 D4 1 32;332 F#4 1 32;328 F#6 1 32;336 D6 1 32;344 A5 1 32;348 B5 1 32;340 F#4 1 32;344 D4 1 32;348 B3 1 32;352 G3 1 32;356 B3 1 32;360 D4 1 32;364 F#4 1 32;368 A4 1 32;336 A4 1 32;376 A5 1 32;384 E5 1 32;384 A3 1 32;388 E4 1 32;392 A4 1 32;396 B4 1 32;400 C#5 1 32;404 B4 1 32;408 A4 1 32;412 E4 1 32;416 A3 1 32;432 C#5 1 32;436 E5 1 32;440 A5 1 32;444 C#6 1 32;460 B3 1 32;464 D4 1 32;468 F#4 1 32;472 A4 1 32;476 C#5 1 32;459 F#5 1 32;459 B5 1 32;459 D6 1 32;472 C#6 1 32;476 A5 1 32;484 E5 1 32;484 E6 1 32;488 F#6 1 32;488 F#5 1 32;492 B3 1 32;496 D4 1 32;500 F#4 1 32;504 A4 1 32;508 C#5 1 32;496 D6 1 32;516 B5 1 32;520 C#6 1 32;524 D6 1 32;532 C#6 1 32;536 D6 1 32;544 F#6 1 32;555 C#6 8 32;524 G3 1 32;528 B3 1 32;532 D4 1 32;536 F#4 1 32;540 A4 1 32;555 A5 8 32;555 E5 8 32;555 A4 9 32;556 E4 8 32;556 C#4 8 32;556 A3 8 32;572 B5 1 32;576 A5 1 32;580 B5 1 32;580 E3 1 32;584 G#3 1 32;588 B3 1 32;592 E4 1 32;596 G#4 1 32;600 E4 1 32;604 B3 1 32;608 G#3 1 32;612 E3 1 32;616 G#3 1 32;620 B3 1 32;624 E4 1 32;628 G#4 1 32;632 E4 1 32;636 A3 1 32;644 G3 1 32;648 B3 1 32;652 D4 1 32;656 F#4 1 32;644 G6 1 32;648 F#6 1 32;652 E6 1 32;656 D6 1 32;660 E6 1 32;664 D6 1 32;668 E6 1 32;672 F#6 1 32;660 A4 1 32;664 F#4 1 32;668 D4 1 32;672 B3 1 32;676 A3 1 32;680 C#4 1 32;684 E4 1 32;688 A4 1 32;692 C#5 1 32;696 A4 1 32;700 E4 1 32;680 E6 1 32;692 A6 1 32;704 C#4 1 32;708 E3 1 32;712 G#3 1 32;716 B3 1 32;720 E4 1 32;724 G#4 1 32;708 G#6 1 32;712 E6 1 32;716 B5 1 32;720 G#5 1 32;724 E5 1 32;738 E3 1 32;742 G#3 1 32;746 B3 1 32;750 E4 1 32;754 G#4 1 32;738 B5 1 32;742 G#5 1 32;746 E5 1 32;750 B4 1 32;766 E3 1 32;770 G#3 1 32;774 B3 1 32;778 E4 1 32;782 B3 1 32;794 E3 1 32;798 G#3 1 32;802 B3 1 32;806 E4 1 32;810 E4 1 32;818 A5 1 32;826 E5 1 32;826 A3 1 32;830 E4 1 32;834 A4 1 32;838 B4 1 32;842 C#5 1 32;846 B4 1 32;850 A4 1 32;854 E4 1 32;858 D4 1 32;862 F#4 1 32;866 C#5 1 32;870 E5 1 32;874 C#5 1 32;878 A4 1 32;882 A5 1 32;890 A5 1 32;890 A3 1 32;894 E4 1 32;898 A4 1 32;902 B4 1 32;906 C#5 1 32;910 B4 1 32;914 A4 1 32;918 E4 1 32;922 D4 1 32;926 F#4 1 32;930 C#5 1 32;934 E5 1 32;938 C#5 1 32;942 A4 1 32;420 C#4 1 32;424 E4 1 32;428 A4 1 32;953 A3 1 32;953 G#5 1 32;957 E4 1 32;961 A4 1 32;965 B4 1 32;969 C#5 1 32;974 B4 1 32;979 A4 1 32;985 E4 1 32;995 A5 1 32;995 A3 1 32;995 C#4 1 32;995 E4 1 32;995 A4 1 32;995 C#5 1 32;995 E5 1 32;1015 B7 1 5'
# https://onlinesequencer.net/1865394 C418 - Calm 3
# song = '0 E4 1 8;0 G4 1 8;8 A4 1 8;8 D5 1 8;16 F#4 1 8;16 A4 1 8;24 A4 1 8;24 C#5 1 8;32 E4 1 8;32 G4 1 8;32 B4 1 8;40 A4 1 8;40 D5 1 8;40 F#5 1 8;48 F#4 1 8;48 A4 1 8;48 C#5 1 8;56 A4 1 8;56 C#5 1 8;56 E5 1 8;64 E4 1 8;64 G4 1 8;64 B4 1 8;72 A4 1 8;72 D5 1 8;72 F#5 1 8;80 F#4 1 8;80 A4 1 8;80 C#5 1 8;88 A4 1 8;88 C#5 1 8;88 E5 1 8;96 E4 1 8;96 G4 1 8;96 B4 1 8;100 A5 1 8;102 B5 1 8;104 A4 1 8;104 D5 1 8;104 F#5 1 8;110 D5 1 8;111 E5 1 8;112 F#4 1 8;112 A4 1 8;112 C#5 1 8;118 F#5 1 8;119 A5 1 8;120 A4 1 8;120 C#5 1 8;120 E5 1 8;128 E4 1 8;128 G4 1 8;128 B4 1 8;130 D6 1 8;132 B5 1 8;134 A5 1 8;136 A4 1 8;136 D5 1 8;136 F#5 1 8;142 D5 1 8;143 E5 1 8;144 F#4 1 8;144 A4 1 8;144 C#5 1 8;150 A5 1 8;151 F#5 1 8;152 A4 1 8;152 C#5 1 8;152 E5 1 8;160 E4 1 8;160 G4 1 8;160 B4 1 8;164 A5 1 8;166 B5 1 8;168 D5 1 8;168 F#5 1 8;168 D6 1 8;174 D5 1 8;174 F#6 1 8;175 E5 1 8;175 E6 1 8;176 D5 1 8;176 C#6 1 8;182 F#5 1 8;182 D6 1 8;183 C#6 1 8;184 A4 1 8;184 C#5 1 8;184 E5 1 8;184 A5 1 8;192 E4 1 8;192 G4 1 8;192 B4 1 8;196 B5 1 8;198 A5 1 8;200 A4 1 8;200 D5 1 8;200 F#5 1 8;206 D5 1 8;207 E5 1 8;208 F#4 1 8;208 A4 1 8;208 C#5 1 8;214 F#5 1 8;215 A5 1 8;216 A4 1 8;216 C#5 1 8;216 E5 1 8;224 E4 1 8;224 G4 1 8;224 B4 1 8;228 A5 1 8;230 B5 1 8;232 A4 1 8;232 D5 1 8;232 F#5 1 8;238 D5 1 8;239 E5 1 8;240 F#4 1 8;240 A4 1 8;240 C#5 1 8;244 F#6 1 8;246 F#5 1 8;247 A5 1 8;248 A4 1 8;248 C#5 1 8;248 E5 1 8;256 B4 1 8;256 D5 1 8;256 F#5 1 8;262 B5 1 8;263 A5 1 8;264 G#4 1 8;264 B4 1 8;264 E5 1 8;268 E5 1 8;270 D5 1 8;272 E4 1 8;272 A4 1 8;272 C#5 1 8;278 D5 1 8;279 E5 1 8;280 D4 1 8;280 G4 1 8;280 B4 1 8;288 B4 1 8;288 D5 1 8;288 F#5 1 8;288 D6 1 8;294 B5 1 8;295 A5 1 8;296 G#4 1 8;296 B4 1 8;296 E5 1 8;300 E5 1 8;302 D5 1 8;302 E6 1 8;304 E4 1 8;304 A4 1 8;304 C#5 1 8;304 C#6 1 8;308 D6 1 8;310 F#6 1 8;312 B4 1 8;312 B5 1 8;0 E3 1 8;4 F#3 1 8;8 G3 1 8;12 B3 1 8;16 A3 1 8;20 G3 1 8;24 D3 1 8;32 E3 1 8;36 F#3 1 8;40 G3 1 8;44 B3 1 8;48 A3 1 8;52 G3 1 8;56 D3 1 8;64 E3 1 8;68 F#3 1 8;72 G3 1 8;76 B3 1 8;80 A3 1 8;84 G3 1 8;88 D3 1 8;96 E3 1 8;100 F#3 1 8;104 G3 1 8;108 B3 1 8;112 A3 1 8;116 G3 1 8;120 D3 1 8;128 E3 1 8;132 F#3 1 8;136 G3 1 8;140 B3 1 8;144 A3 1 8;148 G3 1 8;152 D3 1 8;160 E3 1 8;164 F#3 1 8;168 G3 1 8;168 A4 1 8;172 B3 1 8;176 A3 1 8;176 F#4 1 8;176 A4 1 8;180 G3 1 8;184 D3 1 8;192 E3 1 8;196 F#3 1 8;200 G3 1 8;204 B3 1 8;208 A3 1 8;212 G3 1 8;216 D3 1 8;224 E3 1 8;228 F#3 1 8;232 G3 1 8;236 B3 1 8;240 A3 1 8;244 G3 1 8;248 D3 1 8;256 B2 1 8;256 B3 1 8;264 E3 1 8;264 E4 1 8;272 A2 1 8;272 A3 1 8;280 G2 1 8;280 G3 1 8;288 B2 1 8;288 B3 1 8;296 E3 1 8;296 E4 1 8;304 A2 1 8;304 A3 1 8;312 G2 1 8;312 D4 1 8;312 G4 1 8;312 B4 1 8;0 E4 1 26;0 G4 1 26;8 A4 1 26;8 D5 1 26;16 F#4 1 26;16 A4 1 26;24 A4 1 26;24 C#5 1 26;32 E4 1 26;32 G4 1 26;32 B4 1 26;40 A4 1 26;40 D5 1 26;40 F#5 1 26;48 F#4 1 26;48 A4 1 26;48 C#5 1 26;56 A4 1 26;56 C#5 1 26;56 E5 1 26;64 E4 1 26;64 G4 1 26;64 B4 1 26;72 A4 1 26;72 D5 1 26;72 F#5 1 26;80 F#4 1 26;80 A4 1 26;80 C#5 1 26;88 A4 1 26;88 C#5 1 26;88 E5 1 26;96 E4 1 26;96 G4 1 26;96 B4 1 26;100 A5 1 26;102 B5 1 26;104 A4 1 26;104 D5 1 26;104 F#5 1 26;110 D5 1 26;111 E5 1 26;112 F#4 1 26;112 A4 1 26;112 C#5 1 26;118 F#5 1 26;119 A5 1 26;120 A4 1 26;120 C#5 1 26;120 E5 1 26;128 E4 1 26;128 G4 1 26;128 B4 1 26;130 D6 1 26;132 B5 1 26;134 A5 1 26;136 A4 1 26;136 D5 1 26;136 F#5 1 26;142 D5 1 26;143 E5 1 26;144 F#4 1 26;144 A4 1 26;144 C#5 1 26;150 A5 1 26;151 F#5 1 26;152 A4 1 26;152 C#5 1 26;152 E5 1 26;160 E4 1 26;160 G4 1 26;160 B4 1 26;164 A5 1 26;166 B5 1 26;168 D5 1 26;168 F#5 1 26;168 D6 1 26;174 D5 1 26;174 F#6 1 26;175 E5 1 26;175 E6 1 26;176 D5 1 26;176 C#6 1 26;182 F#5 1 26;182 D6 1 26;183 C#6 1 26;184 A4 1 26;184 C#5 1 26;184 E5 1 26;184 A5 1 26;192 E4 1 26;192 G4 1 26;192 B4 1 26;196 B5 1 26;198 A5 1 26;200 A4 1 26;200 D5 1 26;200 F#5 1 26;206 D5 1 26;207 E5 1 26;208 F#4 1 26;208 A4 1 26;208 C#5 1 26;214 F#5 1 26;215 A5 1 26;216 A4 1 26;216 C#5 1 26;216 E5 1 26;224 E4 1 26;224 G4 1 26;224 B4 1 26;228 A5 1 26;230 B5 1 26;232 A4 1 26;232 D5 1 26;232 F#5 1 26;238 D5 1 26;239 E5 1 26;240 F#4 1 26;240 A4 1 26;240 C#5 1 26;244 F#6 1 26;246 F#5 1 26;247 A5 1 26;248 A4 1 26;248 C#5 1 26;248 E5 1 26;256 B4 1 26;256 D5 1 26;256 F#5 1 26;262 B5 1 26;263 A5 1 26;264 G#4 1 26;264 B4 1 26;264 E5 1 26;268 E5 1 26;270 D5 1 26;272 E4 1 26;272 A4 1 26;272 C#5 1 26;278 D5 1 26;279 E5 1 26;280 D4 1 26;280 G4 1 26;280 B4 1 26;288 B4 1 26;288 D5 1 26;288 F#5 1 26;288 D6 1 26;294 B5 1 26;295 A5 1 26;296 G#4 1 26;296 B4 1 26;296 E5 1 26;300 E5 1 26;302 D5 1 26;302 E6 1 26;304 E4 1 26;304 A4 1 26;304 C#5 1 26;304 C#6 1 26;308 D6 1 26;310 F#6 1 26;312 B4 1 26;312 B5 1 26;0 E3 1 26;4 F#3 1 26;8 G3 1 26;12 B3 1 26;16 A3 1 26;20 G3 1 26;24 D3 1 26;32 E3 1 26;36 F#3 1 26;40 G3 1 26;44 B3 1 26;48 A3 1 26;52 G3 1 26;56 D3 1 26;64 E3 1 26;68 F#3 1 26;72 G3 1 26;76 B3 1 26;80 A3 1 26;84 G3 1 26;88 D3 1 26;96 E3 1 26;100 F#3 1 26;104 G3 1 26;108 B3 1 26;112 A3 1 26;116 G3 1 26;120 D3 1 26;128 E3 1 26;132 F#3 1 26;136 G3 1 26;140 B3 1 26;144 A3 1 26;148 G3 1 26;152 D3 1 26;160 E3 1 26;164 F#3 1 26;168 G3 1 26;168 A4 1 26;172 B3 1 26;176 A3 1 26;176 F#4 1 26;176 A4 1 26;180 G3 1 26;184 D3 1 26;192 E3 1 26;196 F#3 1 26;200 G3 1 26;204 B3 1 26;208 A3 1 26;212 G3 1 26;216 D3 1 26;224 E3 1 26;228 F#3 1 26;232 G3 1 26;236 B3 1 26;240 A3 1 26;244 G3 1 26;248 D3 1 26;256 B2 1 26;256 B3 1 26;264 E3 1 26;264 E4 1 26;272 A2 1 26;272 A3 1 26;280 G2 1 26;280 G3 1 26;288 B2 1 26;288 B3 1 26;296 E3 1 26;296 E4 1 26;304 A2 1 26;304 A3 1 26;312 G2 1 26;312 D4 1 26;312 G4 1 26;312 B4 1 26'
# https://onlinesequencer.net/59849 Never gonna give you up (by 3CHO)
song = '0 A#4 1 0;0 D5 1 0;0 F5 1 0;6 C5 1 0;6 E5 1 0;6 G5 1 0;12 C5 1 0;16 C5 1 0;16 E5 1 0;16 G5 1 0;22 A5 1 0;22 F5 1 0;22 D5 1 0;28 C6 0.5 0;29 A#5 0.5 0;30 A5 1 0;32 F5 1 0;32 D5 1 0;32 A#4 1 0;38 C5 1 0;38 E5 1 0;38 G5 1 0;58 C5 1 0;59 C5 1 0;60 D5 1 0;61 F5 1 0;63 F5 1 0;64 F5 1 0;64 D5 1 0;64 A#4 1 0;44 C5 1 0;70 G5 1 0;70 E5 1 0;70 C5 1 0;80 C5 1 0;80 E5 1 0;80 G5 1 0;76 C5 1 0;86 A5 1 0;86 F5 1 0;86 D5 1 0;92 C6 1 0;93 A#5 1 0;94 A5 1 0;96 A#4 1 0;96 D5 1 0;96 F5 1 0;102 G5 1 0;102 E5 1 0;102 C5 1 0;108 C5 1 0;112 C5 1 0;112 E5 1 0;114 F5 1 0;116 A4 1 0;116 D5 1 0;116 F5 1 0'
# https://onlinesequencer.net/1696155 Undertale - Heartache
# song = '0 A#3 1 0;0 F#3 2 0;1 C#4 1 0;2 C4 1 0;3 C#4 1 0;4 A#3 1 0;6 A#3 1 0;6 F#3 2 0;7 C4 1 0;8 C#4 1 0;9 G#4 1 0;10 F4 1 0;12 A#3 1 0;12 F#3 2 0;13 C#4 1 0;14 C4 1 0;15 C#4 1 0;16 A#3 1 0;18 A#3 1 0;18 F#3 2 0;19 C4 1 0;20 C#4 1 0;21 G#4 1 0;22 F4 1 0;24 A#3 1 0;24 F3 2 0;25 C#4 1 0;26 C4 1 0;27 C#4 1 0;28 A#3 1 0;30 A#3 1 0;31 C4 1 0;30 F3 2 0;32 C#4 1 0;33 G#4 1 0;34 F4 1 0;36 F3 2 0;36 D#5 2 0;38 C6 2 0;38 G#3 2 0;40 C4 2 0;40 A#5 2 0;42 G#5 1 0;42 D#4 2 0;43 F#5 1 0;44 C4 1 0;45 G#3 1 0;44 F5 2 0;46 D#4 1 0;47 C4 1 0;46 G#5 2 0;48 A#3 1 0;48 F#3 2 0;49 C#4 1 0;50 C4 1 0;51 C#4 1 0;52 A#3 1 0;54 A#3 1 0;55 C4 1 0;54 F#3 2 0;56 C#4 1 0;57 G#4 1 0;58 F4 1 0;60 A#3 1 0;60 F#3 2 0;61 C#4 1 0;48 A#5 15 0;62 C4 1 0;63 C#4 1 0;64 A#3 1 0;66 A#3 1 0;66 F#3 2 0;67 C4 1 0;68 C#4 1 0;69 G#4 1 0;70 F4 1 0;72 A#3 1 0;72 F3 2 0;73 C#4 1 0;74 C4 1 0;75 C#4 1 0;76 A#3 1 0;78 A#3 1 0;78 F3 2 0;79 C4 1 0;80 C#4 1 0;81 G#4 1 0;82 F4 1 0;84 D#5 2 0;84 D#6 2 0;84 G#3 2 0;86 C7 2 0;86 D#3 2 0;86 C6 2 0;88 A#6 2 0;88 F3 2 0;88 A#5 2 0;90 G#5 1 0;90 G#6 1 0;90 G#3 2 0;91 F#6 1 0;91 F#5 1 0;92 F5 2 0;92 C4 2 0;92 F6 2 0;94 D#4 2 0;94 G#6 2 0;94 G#5 2 0;96 F#3 2 0;98 C4 1 0;99 C#4 1 0;100 A#3 1 0;101 F3 1 0;96 A#5 8 0;102 F#3 2 0;104 C4 1 0;104 F5 2 0;105 C#4 1 0;106 A#3 1 0;106 G#5 2 0;107 D#4 1 0;108 G#3 2 0;110 D#3 1 0;108 A#5 4 0;111 F3 1 0;112 G#3 1 0;112 C#6 2 0;113 F3 1 0;114 D#6 1 0;114 D#3 2 0;115 C#6 1 0;116 F3 2 0;116 C6 2 0;118 G#3 2 0;118 C#6 2 0;120 A#3 1 0;121 A#4 1 0;122 D#4 1 0;123 F4 1 0;124 C#4 1 0;125 G#3 1 0;126 A#3 1 0;127 A#4 1 0;128 D#4 1 0;129 F4 1 0;130 C#4 1 0;131 G#3 1 0;132 A#3 1 0;133 A#4 1 0;120 A#5 15 0;134 D#4 1 0;135 F4 1 0;136 C#4 1 0;137 F3 1 0;138 G#3 2 0;140 D#3 2 0;142 G#3 2 0;144 A#4 1 0;144 A#3 1 0;144 F#3 1 0;145 C#5 1 0;145 C#4 1 0;146 C5 1 0;146 C4 1 0;147 C#5 1 0;147 C#4 1 0;148 A#4 1 0;148 A#3 1 0;150 A#4 1 0;150 F#3 1 0;150 A#3 1 0;151 C4 1 0;151 C5 1 0;152 C#4 1 0;152 C#5 1 0;153 G#4 1 0;154 F4 1 0;154 F5 1 0;156 A#4 1 0;156 A#3 1 0;156 F#3 1 0;157 C#5 1 0;157 C#4 1 0;158 C5 1 0;158 C4 1 0;159 C#5 1 0;159 C#4 1 0;160 A#3 1 0;160 A#4 1 0;162 A#4 1 0;162 A#3 1 0;162 F#3 1 0;163 C5 1 0;163 C4 1 0;164 C#5 1 0;164 C#4 1 0;165 G#4 1 0;166 F5 1 0;166 F4 1 0;168 A#3 1 0;168 F3 1 0;168 A#4 1 0;169 C#5 1 0;169 C#4 1 0;170 C5 1 0;170 C4 1 0;171 C#4 1 0;171 C#5 1 0;172 A#3 1 0;172 A#4 1 0;174 A#4 1 0;174 A#3 1 0;174 F3 1 0;175 C5 1 0;175 C4 1 0;176 C#5 1 0;176 C#4 1 0;177 G#4 1 0;178 F4 1 0;178 F5 1 0;180 F3 2 0;180 D#6 2 0;180 F4 2 0;182 G#4 2 0;182 C7 2 0;182 G#3 2 0;184 C4 2 0;184 A#6 2 0;184 C5 2 0;186 G#6 1 0;186 D#5 2 0;186 D#4 2 0;187 F#6 1 0;188 C5 1 0;188 C4 1 0;189 G#3 1 0;189 G#4 1 0;188 F6 2 0;190 D#4 1 0;190 D#5 1 0;191 C5 1 0;190 G#6 2 0;191 C4 1 0;192 A#4 1 0;192 A#3 1 0;192 F#3 1 0;193 C#5 1 0;193 C#4 1 0;194 C4 1 0;194 C5 1 0;195 C#4 1 0;195 C#5 1 0;196 A#4 1 0;196 A#3 1 0;198 F#3 1 0;198 A#3 1 0;198 A#4 1 0;199 C5 1 0;199 C4 1 0;200 C#4 1 0;200 C#5 1 0;201 G#4 1 0;202 F4 1 0;202 F5 1 0;204 A#4 1 0;204 F#3 1 0;204 A#3 1 0;205 C#4 1 0;205 C#5 1 0;206 C5 1 0;192 A#6 15 0;206 C4 1 0;207 C#4 1 0;207 C#5 1 0;208 A#3 1 0;208 A#4 1 0;210 A#3 1 0;210 F#3 1 0;210 A#4 1 0;211 C5 1 0;211 C4 1 0;212 C#5 1 0;212 C#4 1 0;213 G#4 1 0;214 F5 1 0;214 F4 1 0;216 D#5 1 0;216 D#4 1 0;217 F4 1 0;217 F5 1 0;218 G#5 1 0;218 G#4 1 0;219 A#4 1 0;219 A#5 1 0;220 C6 1 0;220 C5 1 0;221 C#6 1 0;221 C#5 1 0;222 D#6 1 0;222 D#5 1 0;223 C#6 1 0;223 C#5 1 0;224 C6 1 0;224 C5 1 0;225 A#5 1 0;225 A#4 1 0;226 G#5 1 0;226 G#4 1 0;227 F4 1 0;227 F5 1 0;228 G#5 2 0;228 G#4 2 0;228 D#6 2 0;228 G#3 2 0;230 C7 2 0;230 D#3 2 0;230 D#4 2 0;230 D#6 2 0;232 F4 2 0;232 C#6 2 0;232 F3 2 0;232 A#6 2 0;234 G#6 1 0;234 C6 1 0;234 G#3 2 0;234 G#4 2 0;235 A#5 1 0;235 F#6 1 0;236 G#5 2 0;236 F6 2 0;236 C5 2 0;236 C4 2 0;238 G#6 2 0;238 F5 2 0;238 D#4 2 0;238 D#5 2 0;240 F#3 2 0;242 C4 1 0;243 C#4 1 0;244 A#3 1 0;245 F3 1 0;240 A#5 8 0;246 F#3 2 0;240 F#5 8 0;248 C4 1 0;248 F5 2 0;248 C5 2 0;249 C#4 1 0;250 A#3 1 0;250 G#5 2 0;250 D#5 2 0;251 D#4 1 0;252 G#3 2 0;254 D#3 1 0;252 F5 4 0;252 A#5 4 0;255 F3 1 0;256 G#3 1 0;256 F5 2 0;256 C#6 2 0;257 F3 1 0;258 F#5 1 0;258 D#6 1 0;258 D#3 2 0;259 C#6 1 0;259 F5 1 0;260 D#5 2 0;260 C6 2 0;260 F3 2 0;262 G#3 2 0;262 C#6 2 0;262 F5 2 0;264 A#3 1 0;265 A#4 1 0;266 D#4 1 0;267 F4 1 0;268 C#4 1 0;269 G#3 1 0;270 A#3 1 0;271 A#4 1 0;272 D#4 1 0;273 F4 1 0;274 C#4 1 0;275 G#3 1 0;276 A#3 1 0;277 A#4 1 0;278 D#4 1 0;264 A#5 15 0;264 F5 15 0;279 F4 1 0;280 C#4 1 0;281 F3 1 0;282 G#3 2 0;282 F5 2 0;282 A#5 2 0;284 C#6 2 0;284 G#5 2 0;284 D#3 2 0;286 D#6 2 0;286 A#5 2 0;286 F3 2 0;288 D#3 2 0;290 G#3 1 0;291 A#3 1 0;292 F#3 1 0;293 C#3 1 0;294 D#3 2 0;288 F6 8 0;288 G#5 8 0;296 G#3 1 0;296 G#5 2 0;296 C#6 2 0;297 A#3 1 0;298 D#6 2 0;298 A#5 2 0;298 C4 2 0;300 F3 2 0;302 C4 1 0;300 G#5 4 0;300 F6 4 0;303 G#4 1 0;304 F4 1 0;304 D#6 2 0;304 A#5 2 0;305 C4 1 0;306 G#5 1 0;306 C#6 1 0;306 F3 1 0;307 A#5 1 0;307 D#6 1 0;307 F4 1 0;308 G#3 1 0;309 F4 1 0;308 C6 2 0;308 D#5 2 0;310 A#3 1 0;311 G#4 1 0;310 G#5 2 0;310 F5 2 0;312 F#3 1 0;313 F#4 1 0;314 C#4 1 0;315 D#4 1 0;316 F4 1 0;317 F3 1 0;318 F#3 1 0;312 A#5 8 0;312 F5 8 0;319 F#4 1 0;320 C#4 1 0;320 C#5 2 0;321 D#4 1 0;322 F4 1 0;322 D#5 2 0;323 F#3 1 0;324 F3 1 0;325 F4 1 0;326 C4 1 0;324 F5 4 0;327 G#4 1 0;328 F4 1 0;328 D#5 2 0;329 C4 1 0;330 C#5 1 0;330 F3 1 0;331 F4 1 0;331 D#5 1 0;332 G#3 1 0;332 C5 2 0;333 F4 1 0;334 A#3 1 0;335 G#4 1 0;334 G#5 2 0;336 F#3 1 0;337 F#4 1 0;338 C#4 1 0;339 D#4 1 0;340 F4 1 0;341 F3 1 0;342 F#3 1 0;343 F#4 1 0;336 F5 8 0;344 C#4 1 0;344 G#5 2 0;344 C#6 2 0;345 D#4 1 0;346 F4 1 0;346 A#5 2 0;346 D#6 2 0;347 F#3 1 0;348 G#3 1 0;349 D#3 1 0;350 C4 1 0;348 G#5 4 0;348 F6 4 0;351 G#3 1 0;352 D#4 1 0;352 D#6 2 0;352 A#5 2 0;353 C4 1 0;354 C#6 1 0;354 G#5 1 0;354 G#4 1 0;355 D#6 1 0;355 G#3 1 0;355 A#5 1 0;356 C6 2 0;356 D#5 2 0;356 D#4 2 0;358 G#5 2 0;358 F5 2 0;358 C4 2 0;360 A#3 1 0;361 F3 1 0;362 C4 1 0;363 G#3 1 0;364 C#4 1 0;365 F3 1 0;366 B3 1 0;367 C#4 1 0;368 D#4 1 0;369 F#3 1 0;370 A#3 1 0;371 C#4 1 0;372 B3 1 0;373 F#3 1 0;374 C#4 1 0;360 F5 15 0;360 A#5 15 0;375 F#3 1 0;376 D#4 1 0;377 F#3 1 0;378 F#4 1 0;379 B3 1 0;380 G#4 1 0;381 B3 1 0;382 C#5 1 0;383 G#4 1 0;384 B3 1 0;384 D#6 1 0;384 D#5 1 0;385 F#3 1 0;385 D#6 2 0;385 D#5 2 0;386 C#4 1 0;387 F#3 1 0;388 D#4 1 0;387 D#6 2 0;387 D#5 2 0;389 D#6 1 0;389 F#3 1 0;389 D#5 1 0;390 B3 1 0;391 F#3 1 0;390 D#5 2 0;390 D#6 2 0;392 C#4 1 0;393 F#3 1 0;392 D#6 2 0;392 D#5 2 0;394 D#4 1 0;394 D#6 2 0;394 D#5 2 0;395 F#3 1 0;396 D#6 1 0;396 D#5 1 0;396 F#4 1 0;397 B3 1 0;398 G#4 1 0;397 D#5 2 0;397 D#6 2 0;399 C#4 1 0;399 D#6 2 0;399 D#5 2 0;400 C#5 1 0;401 G#4 1 0;401 D#6 1 0;401 D#5 1 0;402 B4 1 0;403 F#4 1 0;402 D#5 2 0;402 D#6 2 0;404 F#5 1 0;404 D#5 2 0;404 D#6 2 0;405 C#5 1 0;406 B5 1 0;406 D#5 2 0;406 D#6 2 0;407 F#5 1 0;408 B3 1 0;409 D#4 1 0;410 C#4 1 0;411 D#4 1 0;412 B3 1 0;414 B3 1 0;415 C#4 1 0;416 D#4 1 0;417 B4 1 0;418 F#4 1 0;420 B3 1 0;421 D#4 1 0;422 C#4 1 0;423 D#4 1 0;424 B3 1 0;426 B3 1 0;427 C#4 1 0;428 D#4 1 0;429 B4 1 0;430 F#4 1 0;432 F6 1 0;432 A#3 1 0;432.6600036621094 F#6 1 0;433 C#4 1 0;433.3299865722656 G6 1 0;434.010009765625 C4 1 0;434.010009765625 C#6 2 0;434.010009765625 G#6 2 0;435.010009765625 C#4 1 0;436.010009765625 A#3 1 0;436.010009765625 C#6 2 0;436.010009765625 G#6 2 0;438.010009765625 A#3 1 0;439.010009765625 C4 1 0;438.010009765625 C6 2 0;438.010009765625 G6 2 0;440.010009765625 C#4 1 0;440.010009765625 C6 2 0;440.010009765625 G6 2 0;441.010009765625 G#4 1 0;442.010009765625 F4 1 0;442.010009765625 G6 2 0;442.010009765625 C6 2 0;444.010009765625 F6 1 0;444.6700134277344 F#6 1 0;444.010009765625 F3 2 0;445.3399963378906 G6 1 0;446.0199890136719 C#6 2 0;446.0199890136719 G#3 2 0;446.0199890136719 G#6 2 0;448.0199890136719 G#6 2 0;448.0199890136719 C#6 2 0;448.0199890136719 C4 2 0;450.0199890136719 D#4 2 0;450.0199890136719 C6 2 0;450.0199890136719 G#6 2 0;452.0199890136719 G6 1 0;452.0199890136719 A#5 1 0;452.0199890136719 C4 1 0;453.0199890136719 G#3 1 0;453.0199890136719 F6 1 0;453.0199890136719 G#5 1 0;454.0199890136719 D#4 1 0;454.0199890136719 D#6 2 0;454.0199890136719 G#5 2 0;455.0199890136719 C4 1 0;456.0199890136719 B3 1 0;457.0199890136719 D#4 1 0;458.0199890136719 C#4 1 0;459.0199890136719 D#4 1 0;460.0199890136719 B3 1 0;462.0199890136719 B3 1 0;463.0199890136719 C#4 1 0;464.0199890136719 D#4 1 0;465.0199890136719 B4 1 0;466.0199890136719 F#4 1 0;468.0199890136719 B3 1 0;469.0199890136719 D#4 1 0;456.0199890136719 A#5 15 0;456.0199890136719 F6 15 0;470.0199890136719 C#4 1 0;471.0199890136719 D#4 1 0;472.0199890136719 B3 1 0;474.0199890136719 B3 1 0;475.0199890136719 C#4 1 0;476.0199890136719 D#4 1 0;477.0199890136719 B4 1 0;478.0199890136719 F#4 1 0;480.0199890136719 A#3 1 0;480.0199890136719 F6 1 0;480.67999267578125 F#6 1 0;481.0199890136719 C#4 1 0;481.3500061035156 G6 1 0;482.0299987792969 C4 1 0;482.0299987792969 C#6 2 0;482.0299987792969 G#6 2 0;483.0299987792969 C#4 1 0;484.0299987792969 A#3 1 0;484.0299987792969 C#6 2 0;484.0299987792969 G#6 2 0;486.0299987792969 A#3 1 0;487.0299987792969 C4 1 0;486.0299987792969 C6 2 0;486.0299987792969 G6 2 0;488.0299987792969 C#4 1 0;488.0299987792969 G6 2 0;488.0299987792969 C6 2 0;489.0299987792969 G#4 1 0;490.0299987792969 F4 1 0;490.0299987792969 G6 2 0;490.0299987792969 C6 2 0;492.0299987792969 D#5 2 0;492.0299987792969 G#3 2 0;494.0299987792969 C6 2 0;494.0299987792969 D#3 2 0;496.0299987792969 F3 2 0;496.0299987792969 A#5 2 0;498.0299987792969 G#5 1 0;498.0299987792969 G#3 2 0;499.0299987792969 F#5 1 0;500.0299987792969 F5 2 0;500.0299987792969 C4 2 0;502.0299987792969 G#5 2 0;502.0299987792969 D#4 2 0;504.0299987792969 B3 1 0;505.0299987792969 D#4 1 0;506.0299987792969 C#4 1 0;507.0299987792969 D#4 1 0;504.0299987792969 A#5 4 0;508.0299987792969 B3 1 0;508.0299987792969 C#6 2 0;510.0299987792969 B3 1 0;511.0299987792969 C#4 1 0;510.0299987792969 D#6 2 0;512.030029296875 D#4 1 0;512.030029296875 C#6 2 0;513.030029296875 B4 1 0;514.030029296875 F#4 1 0;514.030029296875 D#6 2 0;516.030029296875 B3 1 0;517.030029296875 D#4 1 0;518.030029296875 C#4 1 0;516.030029296875 F6 4 0;519.030029296875 D#4 1 0;520.030029296875 B3 1 0;520.030029296875 D#6 2 0;522.030029296875 B3 1 0;522.030029296875 F#6 2 0;523.030029296875 C#4 1 0;524.030029296875 D#4 1 0;524.030029296875 F6 2 0;525.030029296875 B4 1 0;526.030029296875 F#4 1 0;526.030029296875 D#6 2 0;528.030029296875 A#3 1 0;529.030029296875 C#4 1 0;530.030029296875 C4 1 0;528.030029296875 F6 4 0;531.030029296875 C#4 1 0;532.030029296875 A#3 1 0;532.030029296875 F#6 2 0;534.030029296875 F6 1 0;534.030029296875 A#3 1 0;535.030029296875 C4 1 0;535.030029296875 F#6 1 0;536.030029296875 C#4 1 0;536.030029296875 D#6 2 0;537.030029296875 G#4 1 0;538.030029296875 F4 1 0;538.030029296875 F6 2 0;540.030029296875 F3 2 0;540.030029296875 C#6 4 0;542.030029296875 G#3 2 0;544.030029296875 D#6 2 0;544.030029296875 C4 2 0;546.030029296875 C6 1 0;547.030029296875 C#6 1 0;546.030029296875 D#4 2 0;548.030029296875 C4 1 0;548.030029296875 A#5 2 0;549.030029296875 G#3 1 0;550.030029296875 D#4 1 0;551.030029296875 C4 1 0;550.030029296875 G#5 2 0;552.030029296875 B3 1 0;553.030029296875 D#4 1 0;554.030029296875 C#4 1 0;552.030029296875 F5 4 0;555.030029296875 D#4 1 0;556.030029296875 B3 1 0;556.030029296875 F#5 2 0;558.030029296875 B3 1 0;559.030029296875 C#4 1 0;560.030029296875 D#4 1 0;558.030029296875 G#5 4 0;561.030029296875 B4 1 0;562.030029296875 F#4 1 0;562.030029296875 A#5 2 0;564.030029296875 B3 1 0;565.030029296875 D#4 1 0;566.030029296875 C#4 1 0;564.030029296875 F5 4 0;567.030029296875 D#4 1 0;568.030029296875 B3 1 0;568.030029296875 F#5 2 0;570.030029296875 B3 1 0;571.030029296875 C#4 1 0;572.030029296875 D#4 1 0;570.030029296875 G#5 4 0;573.030029296875 B4 1 0;574.030029296875 F#4 1 0;574.030029296875 A#5 2 0;576.030029296875 A#3 1 0;576.030029296875 F5 1 0;576.030029296875 F6 1 0;576.6900024414062 F#6 1 0;577.030029296875 C#4 1 0;577.030029296875 F#5 1 0;577.3599853515625 G6 1 0;578.0399780273438 C4 1 0;579.0399780273438 C#4 1 0;578.0399780273438 C#6 2 0;578.0399780273438 G#6 2 0;580.0399780273438 A#3 1 0;580.0399780273438 G#6 2 0;580.0399780273438 C#6 2 0;582.0399780273438 A#3 1 0;582.0399780273438 C6 2 0;582.0399780273438 G6 2 0;583.0399780273438 C4 1 0;584.0399780273438 C#4 1 0;585.0399780273438 G#4 1 0;584.0399780273438 C6 2 0;584.0399780273438 G6 2 0;586.0399780273438 F4 1 0;586.0399780273438 C6 2 0;586.0399780273438 G6 2 0;588.0399780273438 F6 1 0;588.7000122070312 F#6 1 0;588.0399780273438 F3 2 0;589.3800048828125 G6 1 0;590.0499877929688 G#6 2 0;590.0499877929688 C#6 2 0;590.0499877929688 G#3 2 0;578.0399780273438 F5 15 0;592.0499877929688 G#6 2 0;592.0499877929688 C4 2 0;592.0499877929688 C#6 2 0;594.0499877929688 D#4 2 0;594.0499877929688 C6 2 0;594.0499877929688 G#6 2 0;596.0499877929688 C4 1 0;596.0499877929688 G6 1 0;596.0499877929688 A#5 1 0;597.0499877929688 G#3 1 0;597.0499877929688 F6 1 0;597.0499877929688 G#5 1 0;598.0499877929688 D#4 1 0;598.0499877929688 D#6 2 0;598.0499877929688 G#5 2 0;599.0499877929688 C4 1 0;600.0499877929688 B3 1 0;601.0499877929688 D#4 1 0;602.0499877929688 C#4 1 0;600.0499877929688 A#5 4 0;603.0499877929688 D#4 1 0;604.0499877929688 B3 1 0;604.0499877929688 C#6 2 0;606.0499877929688 B3 1 0;606.0499877929688 D#6 2 0;607.0499877929688 C#4 1 0;608.0499877929688 D#4 1 0;608.0499877929688 C#6 2 0;609.0499877929688 B4 1 0;610.0499877929688 F#4 1 0;610.0499877929688 D#6 2 0;612.0499877929688 B3 1 0;613.0499877929688 D#4 1 0;614.0499877929688 C#4 1 0;612.0499877929688 F6 4 0;615.0499877929688 D#4 1 0;616.0499877929688 B3 1 0;616.0499877929688 D#6 2 0;618.0499877929688 B3 1 0;618.0499877929688 F#6 2 0;619.0499877929688 C#4 1 0;620.0499877929688 D#4 1 0;621.0499877929688 B4 1 0;620.0499877929688 F6 2 0;622.0499877929688 F#4 1 0;622.0499877929688 D#6 2 0;624.0499877929688 A#3 1 0;625.0499877929688 C#4 1 0;626.0499877929688 C4 1 0;624.0499877929688 F6 4 0;627.0499877929688 C#4 1 0;628.0499877929688 A#3 1 0;628.0499877929688 F#6 2 0;630.0499877929688 A#3 1 0;630.0499877929688 F6 1 0;631.0499877929688 F#6 1 0;631.0499877929688 C4 1 0;632.0499877929688 C#4 1 0;633.0499877929688 G#4 1 0;632.0499877929688 D#6 2 0;634.0499877929688 F4 1 0;634.0499877929688 F6 2 0;636.0499877929688 F3 2 0;636.0499877929688 C#6 4 0;638.0499877929688 G#3 2 0;640.0499877929688 C4 2 0;640.0499877929688 D#6 2 0;642.0499877929688 C6 1 0;642.0499877929688 D#4 2 0;643.0499877929688 C#6 1 0;644.0499877929688 C4 1 0;644.0499877929688 A#5 2 0;645.0499877929688 G#3 1 0;646.0499877929688 D#4 1 0;646.0499877929688 G#5 2 0;647.0499877929688 C4 1 0;648.0499877929688 B3 1 0;649.0499877929688 D#4 1 0;650.0499877929688 C#4 1 0;651.0499877929688 D#4 1 0;648.0499877929688 F5 4 0;652.0499877929688 B3 1 0;652.0499877929688 F#5 2 0;654.0499877929688 B3 1 0;655.0499877929688 C#4 1 0;656.0499877929688 D#4 1 0;654.0499877929688 G#5 4 0;657.0499877929688 B4 1 0;658.0499877929688 F#4 1 0;658.0499877929688 A#5 2 0;660.0499877929688 B3 1 0;661.0499877929688 D#4 1 0;662.0499877929688 C#4 1 0;660.0499877929688 F5 4 0;663.0499877929688 D#4 1 0;664.0499877929688 B3 1 0;664.0499877929688 F#5 2 0;666.0499877929688 B3 1 0;667.0499877929688 C#4 1 0;668.0499877929688 D#4 1 0;669.0499877929688 B4 1 0;666.0499877929688 G#5 4 0;670.0499877929688 F#4 1 0;670.0499877929688 A#5 2 0;672.0499877929688 A#3 1 0;672.0499877929688 F5 1 0;673.0499877929688 C#4 1 0;673.0499877929688 F#5 1 0;674.0499877929688 C4 1 0;675.0499877929688 C#4 1 0;676.0499877929688 A#3 1 0;678.0499877929688 A#3 1 0;679.0499877929688 C4 1 0;680.0499877929688 C#4 1 0;674.0499877929688 F5 8 0;681.0499877929688 G#4 1 0;682.0499877929688 F4 1 0;684.0499877929688 G#3 2 0;684.0499877929688 D#6 2 0;684.0499877929688 G#5 2 0;686.0499877929688 D#6 2 0;686.0499877929688 C7 2 0;686.0499877929688 D#3 2 0;688.0499877929688 F3 2 0;688.0499877929688 C#6 2 0;688.0499877929688 A#6 2 0;690.0499877929688 G#6 1 0;690.0499877929688 C6 1 0;691.0499877929688 F#6 1 0;691.0499877929688 A#5 1 0;690.0499877929688 G#3 2 0;692.0499877929688 G#5 2 0;692.0499877929688 F6 2 0;692.0499877929688 C4 2 0;694.0499877929688 F5 2 0;694.0499877929688 G#6 2 0;694.0499877929688 D#4 2 0;696.0499877929688 F#3 2 0;698.0499877929688 C4 1 0;699.0499877929688 C#4 1 0;700.0499877929688 A#3 1 0;701.0499877929688 F3 1 0;702.0499877929688 F#3 2 0;696.0499877929688 A#5 8 0;696.0499877929688 F#5 8 0;704.0499877929688 C4 1 0;704.0499877929688 F5 2 0;705.0499877929688 C#4 1 0;704.0499877929688 C5 2 0;706.0499877929688 A#3 1 0;706.0499877929688 D#5 2 0;706.0499877929688 G#5 2 0;707.0499877929688 D#4 1 0;708.0499877929688 G#3 2 0;710.0499877929688 D#3 1 0;708.0499877929688 F5 4 0;711.0499877929688 F3 1 0;708.0499877929688 A#5 4 0;712.0499877929688 G#3 1 0;713.0499877929688 F3 1 0;712.0499877929688 F5 2 0;712.0499877929688 C#6 2 0;714.0499877929688 F#5 1 0;714.0499877929688 D#6 1 0;714.0499877929688 D#3 2 0;715.0499877929688 C#6 1 0;715.0499877929688 F5 1 0;716.0499877929688 D#5 2 0;716.0499877929688 F3 2 0;716.0499877929688 C6 2 0;718.0499877929688 G#3 2 0;718.0499877929688 F5 2 0;718.0499877929688 C#6 2 0;720.0499877929688 A#3 1 0;721.0499877929688 A#4 1 0;722.0499877929688 D#4 1 0;723.0499877929688 F4 1 0;724.0499877929688 C#4 1 0;725.0499877929688 G#3 1 0;726.0499877929688 A#3 1 0;727.0499877929688 A#4 1 0;728.0499877929688 D#4 1 0;729.0499877929688 F4 1 0;730.0499877929688 C#4 1 0;731.0499877929688 G#3 1 0;732.0499877929688 A#3 1 0;733.0499877929688 A#4 1 0;734.0499877929688 D#4 1 0;720.0499877929688 F5 15 0;720.0499877929688 A#5 15 0;735.0499877929688 F4 1 0;736.0499877929688 C#4 1 0;737.0499877929688 F3 1 0;738.0499877929688 A#5 2 0;738.0499877929688 F5 2 0;738.0499877929688 G#3 2 0;740.0499877929688 D#3 2 0;740.0499877929688 C#6 2 0;740.0499877929688 G#5 2 0;742.0499877929688 A#5 2 0;742.0499877929688 D#6 2 0;742.0499877929688 F3 2 0;744.0499877929688 D#3 2 0;746.0499877929688 G#3 1 0;747.0499877929688 A#3 1 0;748.0499877929688 F#3 1 0;749.0499877929688 C#3 1 0;744.0499877929688 G#5 8 0;750.0499877929688 D#3 2 0;744.0499877929688 F6 8 0;752.0499877929688 G#3 1 0;753.0499877929688 A#3 1 0;752.0499877929688 G#5 2 0;752.0499877929688 C#6 2 0;754.0499877929688 A#5 2 0;754.0499877929688 D#6 2 0;754.0499877929688 C4 2 0;756.0499877929688 F3 2 0;758.0499877929688 C4 1 0;756.0499877929688 G#5 4 0;756.0499877929688 F6 4 0;759.0499877929688 G#4 1 0;760.0499877929688 F4 1 0;760.0499877929688 D#6 2 0;761.0499877929688 C4 1 0;760.0499877929688 A#5 2 0;762.0499877929688 G#5 1 0;762.0499877929688 C#6 1 0;762.0499877929688 F3 1 0;763.0499877929688 A#5 1 0;763.0499877929688 D#6 1 0;763.0499877929688 F4 1 0;764.0499877929688 G#3 1 0;764.0499877929688 D#5 2 0;764.0499877929688 C6 2 0;765.0499877929688 F4 1 0;766.0499877929688 A#3 1 0;766.0499877929688 F5 2 0;766.0499877929688 G#5 2 0;767.0499877929688 G#4 1 0;768.0499877929688 F#3 1 0;769.0499877929688 F#4 1 0;770.0499877929688 C#4 1 0;771.0499877929688 D#4 1 0;772.0499877929688 F4 1 0;773.0499877929688 F3 1 0;774.0499877929688 F#3 1 0;775.0499877929688 F#4 1 0;768.0499877929688 F5 8 0;768.0499877929688 A#5 8 0;776.0499877929688 C#4 1 0;777.0499877929688 D#4 1 0;776.0499877929688 C#5 2 0;778.0499877929688 F4 1 0;778.0499877929688 D#5 2 0;779.0499877929688 F#3 1 0;780.0499877929688 F3 1 0;781.0499877929688 F4 1 0;782.0499877929688 C4 1 0;783.0499877929688 G#4 1 0;780.0499877929688 F5 4 0;784.0499877929688 F4 1 0;784.0499877929688 D#5 2 0;785.0499877929688 C4 1 0;786.0499877929688 F3 1 0;786.0499877929688 C#5 1 0;787.0499877929688 D#5 1 0;787.0499877929688 F4 1 0;788.0499877929688 G#3 1 0;788.0499877929688 C5 2 0;789.0499877929688 F4 1 0;790.0499877929688 A#3 1 0;791.0499877929688 G#4 1 0;790.0499877929688 G#5 2 0;792.0499877929688 F#3 1 0;793.0499877929688 F#4 1 0;794.0499877929688 C#4 1 0;795.0499877929688 D#4 1 0;796.0499877929688 F4 1 0;797.0499877929688 F3 1 0;798.0499877929688 F#3 1 0;792.0499877929688 F5 8 0;799.0499877929688 F#4 1 0;800.0499877929688 C#4 1 0;800.0499877929688 G#5 2 0;800.0499877929688 C#6 2 0;801.0499877929688 D#4 1 0;802.0499877929688 F4 1 0;803.0499877929688 F#3 1 0;802.0499877929688 D#6 2 0;802.0499877929688 A#5 2 0;804.0499877929688 G#3 1 0;805.0499877929688 D#3 1 0;806.0499877929688 C4 1 0;804.0499877929688 F6 4 0;804.0499877929688 G#5 4 0;807.0499877929688 G#3 1 0;808.0499877929688 D#4 1 0;809.0499877929688 C4 1 0;808.0499877929688 A#5 2 0;808.0499877929688 D#6 2 0;810.0499877929688 G#5 1 0;810.0499877929688 C#6 1 0;810.0499877929688 G#4 1 0;811.0499877929688 D#6 1 0;811.0499877929688 G#3 1 0;811.0499877929688 A#5 1 0;812.0499877929688 D#4 2 0;812.0499877929688 D#5 2 0;812.0499877929688 C6 2 0;814.0499877929688 G#5 2 0;814.0499877929688 F5 2 0;814.0499877929688 C4 2 0;816.0499877929688 F#3 1 0;817.0499877929688 F#4 1 0;818.0499877929688 C#4 1 0;819.0499877929688 D#4 1 0;820.0499877929688 F4 1 0;821.0499877929688 F3 1 0;822.0499877929688 F#3 1 0;816.0499877929688 A#5 8 0;816.0499877929688 F5 8 0;823.0499877929688 F#4 1 0;824.0499877929688 C#4 1 0;824.0499877929688 C#6 2 0;824.0499877929688 G#5 2 0;825.0499877929688 D#4 1 0;826.0499877929688 F4 1 0;826.0499877929688 A#5 2 0;826.0499877929688 D#6 2 0;827.0499877929688 F#3 1 0;828.0499877929688 F3 1 0;829.0499877929688 F4 1 0;830.0499877929688 C4 1 0;828.0499877929688 F6 4 0;828.0499877929688 G#5 4 0;831.0499877929688 G#4 1 0;832.0499877929688 F4 1 0;833.0499877929688 C4 1 0;832.0499877929688 D#6 2 0;832.0499877929688 A#5 2 0;834.0499877929688 G#5 1 0;834.0499877929688 C#6 1 0;834.0499877929688 F3 1 0;835.0499877929688 A#5 1 0;835.0499877929688 F4 1 0;835.0499877929688 D#6 1 0;836.0499877929688 G#3 1 0;836.0499877929688 D#5 2 0;836.0499877929688 C6 2 0;837.0499877929688 F4 1 0;838.0499877929688 A#3 1 0;838.0499877929688 F5 2 0;839.0499877929688 G#4 1 0;838.0499877929688 G#5 2 0;840.0499877929688 A#3 1 0;841.0499877929688 A#4 1 0;842.0499877929688 D#4 1 0;843.0499877929688 F4 1 0;844.0499877929688 C#4 1 0;845.0499877929688 G#3 1 0;846.0499877929688 A#3 1 0;847.0499877929688 A#4 1 0;848.0499877929688 D#4 1 0;849.0499877929688 F4 1 0;850.0499877929688 C#4 1 0;851.0499877929688 G#3 1 0;852.0499877929688 A#3 1 0;840.0499877929688 F5 15 0;840.0499877929688 A#5 15 0;855.0499877929688 A#3 1 0;858.0499877929688 A#3 1 0;860.0499877929688 A#3 1 0;862.0499877929688 A#3 1 0'
"""
Find a piece of music on onlinesequencer.net, click edit,
then select all notes with CTRL+A and copy them with CTRL+C
Paste string as shown above after removing ";:" from
the end and "Online Sequencer:120233:" from the start
"""
from machine import Pin
#One buzzer on pin 0
mySong = music(song, pins=[Pin(0)])
#Four buzzers
#mySong = music(song, pins=[Pin(0),Pin(1),Pin(2),Pin(3)])
while True:
print(mySong.tick())
sleep(0.04)
| 1,581.236364
| 22,257
| 0.645824
|
fc59aded2c00d2ec02313c7310ed55055299528f
| 333
|
py
|
Python
|
api/simpleproxies_api/middleware.py
|
liobrdev/simpleproxies
|
84a0a301981efd5126e63e66b8de5d383658831d
|
[
"MIT"
] | null | null | null |
api/simpleproxies_api/middleware.py
|
liobrdev/simpleproxies
|
84a0a301981efd5126e63e66b8de5d383658831d
|
[
"MIT"
] | null | null | null |
api/simpleproxies_api/middleware.py
|
liobrdev/simpleproxies
|
84a0a301981efd5126e63e66b8de5d383658831d
|
[
"MIT"
] | null | null | null |
from django.utils.deprecation import MiddlewareMixin
from ipware import get_client_ip
class ClientIPMiddleware(MiddlewareMixin):
def process_request(self, request):
client_ip, is_routable = get_client_ip(request)
request.META['CLIENT_IP'] = client_ip
request.META['CLIENT_IP_IS_ROUTABLE'] = is_routable
| 33.3
| 59
| 0.765766
|
4732504d0f632143242c36aab4dec2e7a92b63ec
| 131
|
py
|
Python
|
pipeline/tools/__init__.py
|
juridics/brazilian-legal-text-benchmark
|
bfb38dc0f5090de548d2b5df39ec51aa6c5b8d13
|
[
"Unlicense"
] | 1
|
2022-02-24T12:35:46.000Z
|
2022-02-24T12:35:46.000Z
|
pipeline/tools/__init__.py
|
juridics/brazilian-legal-text-benchmark
|
bfb38dc0f5090de548d2b5df39ec51aa6c5b8d13
|
[
"Unlicense"
] | null | null | null |
pipeline/tools/__init__.py
|
juridics/brazilian-legal-text-benchmark
|
bfb38dc0f5090de548d2b5df39ec51aa6c5b8d13
|
[
"Unlicense"
] | null | null | null |
from .metrics import Metrics
from .logger import AppLogger
from .statistic import Statistic
from .threshold import ThresholdTester
| 26.2
| 38
| 0.847328
|
b88812d311427e955c96eeb0c096683e11fff641
| 3,479
|
py
|
Python
|
t-test.py
|
NicholasDominic/The-NucleoNets
|
83530c2e3bf746c08e9045aeff82435fca5162e1
|
[
"MIT"
] | 1
|
2022-02-19T21:43:12.000Z
|
2022-02-19T21:43:12.000Z
|
t-test.py
|
NicholasDominic/The-NucleoNets
|
83530c2e3bf746c08e9045aeff82435fca5162e1
|
[
"MIT"
] | null | null | null |
t-test.py
|
NicholasDominic/The-NucleoNets
|
83530c2e3bf746c08e9045aeff82435fca5162e1
|
[
"MIT"
] | null | null | null |
# Null Hypothesis Significance Test (NHST)
from pandas import read_csv, merge, DataFrame
from os import listdir
from numpy import mean, std, sqrt
from scipy.stats import t, ttest_rel
from matplotlib.pyplot import boxplot, figure, subplots
def read_and_get_squared_error(data, *args, **kwargs):
a = read_csv(PATH + data, index_col=0)
a.loc[:, "squared_error"] = list(map(lambda x, y: (y-x)**2, a.y_true, a.y_predict))
return a
def t_statistic(data_1, data_2, t_table_one_tailed, t_table_two_tailed, suffixes=('_1', '_2'), *args, **kwargs):
data_1 = read_and_get_squared_error(data_1)
data_2 = read_and_get_squared_error(data_2)
df = merge(data_1, data_2, on="y_true", suffixes=suffixes)
dof = len(df)-1
x = df["squared_error{}".format(suffixes[0])] # OLS
y = df["squared_error{}".format(suffixes[1])] # NucleoNet
for alt in ["two-sided", "less", "greater"]:
if alt == "two-sided":
sig_level = .025
else:
sig_level = .05
t_test, p_value = ttest_rel(y, x, alternative=alt)
print("t-stat: {:.5f}, p-value: {:.5f} ({}, validation: {})".format(t_test, p_value, alt, p_value < sig_level))
# return {
# "number_of_observation" : number_of_observation,
# "mean_1" : round(mean(df["squared_error{}".format(suffixes[0])]), 5),
# "variance_1" : round(std(df["squared_error{}".format(suffixes[0])])**2, 5),
# "mean_2" : round(mean(df["squared_error{}".format(suffixes[1])]), 5),
# "variance_2" : round(std(df["squared_error{}".format(suffixes[1])])**2, 5),
# "degree_of_freedom" : dof,
# "t_test" : round(t_test, 5),
# "p_value_one_tail" : round((t.cdf(-abs(t_test), dof)), 5),
# "t_table_one_tail" : t_table_one_tailed,
# "validation_one_tail" : t_test > t_table_one_tailed,
# "p_value_two_tail" : round(2*(1 - t.cdf(abs(t_test), dof)), 5),
# "t_table_two_tail" : t_table_two_tailed,
# "validation_two_tail" : t_test > t_table_two_tailed
# }
PATH = "./result/prediction/"
data = [i for i in listdir(PATH) if i.endswith("_predict.csv")]
t_table_one_tailed, t_table_two_tailed = 1.6871, 2.0262
# note: Diff. data y_true (38 data): 3.188 / data y_true (104 data): 3.447
a = read_and_get_squared_error(data[1])
b = read_and_get_squared_error(data[2])
dfx = merge(a, b, on="y_true", suffixes=('_1', '_2'))
dataset = [dfx.y_true, read_csv(PATH + data[2], index_col=0).y_true] # dataset = [38_data, 104_data]
_, ax = subplots(figsize=(8, 5))
ax.set_title('Compare')
_ = ax.boxplot(dataset, showmeans=True)
# ============= NHST for Experiment 1 =============
# NucleoNet v1 vs. OLS
t_statistic(data[1], data[2], t_table_one_tailed, t_table_two_tailed)
# NucleoNet v1 vs. OLS with Elastic Net
t_statistic(data[0], data[2], t_table_one_tailed, t_table_two_tailed)
# ============= NHST for Experiment 2 =============
# NucleoNet v2 vs. OLS
t_statistic(data[1], data[3], t_table_one_tailed, t_table_two_tailed)
# NucleoNet v2 vs. OLS with Elastic Net
t_statistic(data[0], data[3], t_table_one_tailed, t_table_two_tailed)
# ============= NHST for Experiment 3 =============
# NucleoNet v3 vs. OLS
t_statistic(data[1], data[4], t_table_one_tailed, t_table_two_tailed)
# NucleoNet v3 vs. OLS with Elastic Net
t_statistic(data[0], data[4], t_table_one_tailed, t_table_two_tailed)
| 41.915663
| 120
| 0.63754
|
74c866a4c2749e21bdedafa5f17960a5867809db
| 21,493
|
py
|
Python
|
autoelective/loop.py
|
royess/PKUAutoElective
|
82bbe7e6089e80a214a380f47f19c7c919650a55
|
[
"MIT"
] | null | null | null |
autoelective/loop.py
|
royess/PKUAutoElective
|
82bbe7e6089e80a214a380f47f19c7c919650a55
|
[
"MIT"
] | null | null | null |
autoelective/loop.py
|
royess/PKUAutoElective
|
82bbe7e6089e80a214a380f47f19c7c919650a55
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: loop.py
# modified: 2019-09-11
import time
import random
from queue import Queue
from collections import deque
from itertools import combinations
from requests.compat import json
from requests.exceptions import RequestException
import numpy as np
from . import __version__, __date__
from .environ import Environ
from .config import AutoElectiveConfig
from .logger import ConsoleLogger, FileLogger
from .course import Course
from .captcha import CaptchaRecognizer
from .parser import get_tables, get_courses, get_courses_with_detail, get_sida
from .hook import _dump_request
from .iaaa import IAAAClient
from .elective import ElectiveClient
from .const import CAPTCHA_CACHE_DIR
from .exceptions import *
environ = Environ()
config = AutoElectiveConfig()
cout = ConsoleLogger("loop")
ferr = FileLogger("loop.error") # loop 的子日志,同步输出到 console
username = config.iaaa_id
password = config.iaaa_password
is_dual_degree = config.is_dual_degree
identity = config.identity
interval = config.refresh_interval
deviation = config.refresh_random_deviation
page = config.supply_cancel_page
iaaa_timeout = config.iaaa_client_timeout
elective_timeout = config.elective_client_timeout
login_loop_interval = config.login_loop_interval
elective_pool_size = config.elective_client_pool_size
config.check_identify(identity)
config.check_supply_cancel_page(page)
recognizer = CaptchaRecognizer()
electivePool = Queue(maxsize=elective_pool_size)
reloginPool = Queue(maxsize=elective_pool_size)
goals = environ.goals # let N = len(goals);
ignored = environ.ignored
mutexes = np.zeros(0, dtype=np.uint8) # uint8 [N][N];
delays = np.zeros(0, dtype=np.int) # int [N];
killedElective = ElectiveClient(-1)
NO_DELAY = -1
class _ElectiveNeedsLogin(Exception):
pass
def _get_refresh_interval():
if deviation <= 0:
return interval
delta = (random.random() * 2 - 1) * deviation * interval
return interval + delta
def _ignore_course(course, reason):
ignored[course.to_simplified()] = reason
def _add_error(e):
clz = e.__class__
name = clz.__name__
key = "[%s] %s" % (e.code, name) if hasattr(clz, "code") else name
environ.errors[key] += 1
def run_iaaa_loop():
elective = None
while True:
if elective is None:
elective = reloginPool.get()
if elective is killedElective:
cout.info("Quit IAAA loop")
return
environ.iaaa_loop += 1
cout.info("Try to login IAAA (client: %s)" % elective.id)
try:
iaaa = IAAAClient(timeout=iaaa_timeout) # not reusable
# request elective's home page to get cookies
r = iaaa.oauth_home()
r = iaaa.oauth_login(username, password)
try:
token = r.json()["token"]
except Exception as e:
ferr.error(e)
raise OperationFailedError(msg="Unable to parse IAAA token. response body: %s" % r.content)
elective.clear_cookies()
r = elective.sso_login(token)
if is_dual_degree:
sida = get_sida(r)
sttp = identity
referer = r.url
r = elective.sso_login_dual_degree(sida, sttp, referer)
cout.info("Login success (client: %s)" % elective.id)
electivePool.put_nowait(elective)
elective = None
except (ServerError, StatusCodeError) as e:
ferr.error(e)
cout.warning("ServerError/StatusCodeError encountered")
_add_error(e)
except OperationFailedError as e:
ferr.error(e)
cout.warning("OperationFailedError encountered")
_add_error(e)
except RequestException as e:
ferr.error(e)
cout.warning("RequestException encountered")
_add_error(e)
except IAAAIncorrectPasswordError as e:
cout.error(e)
_add_error(e)
raise e
except IAAAForbiddenError as e:
ferr.error(e)
_add_error(e)
raise e
except IAAAException as e:
ferr.error(e)
cout.warning("IAAAException encountered")
_add_error(e)
except CaughtCheatingError as e:
ferr.critical(e) # 严重错误
_add_error(e)
raise e
except ElectiveException as e:
ferr.error(e)
cout.warning("ElectiveException encountered")
_add_error(e)
except json.JSONDecodeError as e:
ferr.error(e)
cout.warning("JSONDecodeError encountered")
_add_error(e)
except KeyboardInterrupt as e:
raise e
except Exception as e:
ferr.exception(e)
_add_error(e)
raise e
finally:
t = login_loop_interval
cout.info("")
cout.info("IAAA login loop sleep %s s" % t)
cout.info("")
time.sleep(t)
def run_elective_loop():
elective = None
noWait = False
## load courses
cs = config.courses # OrderedDict
N = len(cs)
cid_cix = {} # { cid: cix }
for ix, (cid, c) in enumerate(cs.items()):
goals.append(c)
cid_cix[cid] = ix
## load mutex
ms = config.mutexes
mutexes.resize((N, N), refcheck=False)
for mid, m in ms.items():
ixs = []
for cid in m.cids:
if cid not in cs:
raise UserInputException("In 'mutex:%s', course %r is not defined" % (mid, cid))
ix = cid_cix[cid]
ixs.append(ix)
for ix1, ix2 in combinations(ixs, 2):
mutexes[ix1, ix2] = mutexes[ix2, ix1] = 1
## load delay
ds = config.delays
delays.resize(N, refcheck=False)
delays.fill(NO_DELAY)
for did, d in ds.items():
cid = d.cid
if cid not in cs:
raise UserInputException("In 'delay:%s', course %r is not defined" % (did, cid))
ix = cid_cix[cid]
delays[ix] = d.threshold
## setup elective pool
for ix in range(1, elective_pool_size + 1):
electivePool.put_nowait(ElectiveClient(id=ix, timeout=elective_timeout))
## print header
header = "# PKU Auto-Elective Tool v%s (%s) #" % (__version__, __date__)
line = "#" + "-" * (len(header) - 2) + "#"
cout.info(line)
cout.info(header)
cout.info(line)
cout.info("")
while True:
noWait = False
if elective is None:
elective = electivePool.get()
environ.elective_loop += 1
cout.info("")
cout.info("======== Loop %d ========" % environ.elective_loop)
cout.info("")
line = "-" * 30
## print current plans
current = [ c for c in goals if c not in ignored ]
if len(current) > 0:
cout.info("> Current tasks")
cout.info(line)
for ix, course in enumerate(current):
cout.info("%02d. %s" % (ix + 1, course))
cout.info(line)
cout.info("")
## print ignored course
if len(ignored) > 0:
cout.info("> Ignored tasks")
cout.info(line)
for ix, (course, reason) in enumerate(ignored.items()):
cout.info("%02d. %s %s" % (ix + 1, course, reason))
cout.info(line)
cout.info("")
## print mutex rules
if np.any(mutexes):
cout.info("> Mutex rules")
cout.info(line)
ixs = [ (ix1, ix2) for ix1, ix2 in np.argwhere( mutexes == 1 ) if ix1 < ix2 ]
for ix, (ix1, ix2) in enumerate(ixs):
cout.info("%02d. %s --x-- %s" % (ix + 1, goals[ix1], goals[ix2]))
cout.info(line)
cout.info("")
## print delay rules
if np.any( delays != NO_DELAY ):
cout.info("> Delay rules")
cout.info(line)
ds = [ (cix, threshold) for cix, threshold in enumerate(delays) if threshold != NO_DELAY ]
for ix, (cix, threshold) in enumerate(ds):
cout.info("%02d. %s --- %d" % (ix + 1, goals[cix], threshold))
cout.info(line)
cout.info("")
if len(current) == 0:
cout.info("No tasks")
cout.info("Quit elective loop")
reloginPool.put_nowait(killedElective) # kill signal
return
## print client info
cout.info("> Current client: %s, (qsize: %s)" % (elective.id, electivePool.qsize() + 1))
cout.info("")
try:
if not elective.hasLogined:
raise _ElectiveNeedsLogin # quit this loop
## check supply/cancel page
page_r = None
if page == 1:
cout.info("Get SupplyCancel page %s" % page)
r = page_r = elective.get_SupplyCancel()
tables = get_tables(r._tree)
elected = get_courses(tables[1])
plans = get_courses_with_detail(tables[0])
else:
#
# 刷新非第一页的课程,第一次请求会遇到返回空页面的情况
#
# 模拟方法:
# 1.先登录辅双,打开补退选第二页
# 2.再在同一浏览器登录主修
# 3.刷新辅双的补退选第二页可以看到
#
# -----------------------------------------------
#
# 引入 retry 逻辑以防止以为某些特殊原因无限重试
# 正常情况下一次就能成功,但是为了应对某些偶发错误,这里设为最多尝试 3 次
#
retry = 3
while True:
if retry == 0:
raise OperationFailedError(msg="unable to get normal Supplement page %s" % page)
cout.info("Get Supplement page %s" % page)
r = page_r = elective.get_supplement(page=page) # 双学位第二页
tables = get_tables(r._tree)
try:
elected = get_courses(tables[1])
plans = get_courses_with_detail(tables[0])
except IndexError as e:
cout.warning("IndexError encountered")
cout.info("Get SupplyCancel first to prevent empty table returned")
_ = elective.get_SupplyCancel() # 遇到空页面时请求一次补退选主页,之后就可以不断刷新
else:
break
finally:
retry -= 1
## check available courses
cout.info("Get available courses")
tasks = [] # [(ix, course)]
for ix, c in enumerate(goals):
if c in ignored:
continue
elif c in elected:
cout.info("%s is elected, ignored" % c)
_ignore_course(c, "Elected")
for (mix, ) in np.argwhere( mutexes[ix,:] == 1 ):
mc = goals[mix]
if mc in ignored:
continue
cout.info("%s is simultaneously ignored by mutex rules" % mc)
_ignore_course(mc, "Mutex rules")
else:
for c0 in plans: # c0 has detail
if c0 == c:
if c0.is_available():
delay = delays[ix]
if delay != NO_DELAY and c0.remaining_quota > delay:
cout.info("%s hasn't reached the delay threshold %d, skip" % (c0, delay))
else:
tasks.append((ix, c0))
cout.info("%s is AVAILABLE now !" % c0)
break
else:
raise UserInputException("%s is not in your course plan, please check your config." % c)
tasks = deque([ (ix, c) for ix, c in tasks if c not in ignored ]) # filter again and change to deque
## elect available courses
if len(tasks) == 0:
cout.info("No course available")
continue
elected = [] # cache elected courses dynamically from `get_ElectSupplement`
while len(tasks) > 0:
ix, course = tasks.popleft()
is_mutex = False
# dynamically filter course by mutex rules
for (mix, ) in np.argwhere( mutexes[ix,:] == 1 ):
mc = goals[mix]
if mc in elected: # ignore course in advanced
is_mutex = True
cout.info("%s --x-- %s" % (course, mc))
cout.info("%s is ignored by mutex rules in advance" % course)
_ignore_course(course, "Mutex rules")
break
if is_mutex:
continue
cout.info("Try to elect %s" % course)
## validate captcha first
while True:
cout.info("Fetch a captcha")
r = elective.get_DrawServlet()
captcha = recognizer.recognize(r.content)
cout.info("Recognition result: %s" % captcha.code)
r = elective.get_Validate(captcha.code)
try:
res = r.json()["valid"] # 可能会返回一个错误网页 ...
except Exception as e:
ferr.error(e)
raise OperationFailedError(msg="Unable to validate captcha")
if res == "2":
cout.info("Validation passed")
break
elif res == "0":
cout.info("Validation failed")
captcha.save(CAPTCHA_CACHE_DIR)
cout.info("Save %s to %s" % (captcha, CAPTCHA_CACHE_DIR))
cout.info("Try again")
else:
cout.warning("Unknown validation result: %s" % res)
## try to elect
try:
r = elective.get_ElectSupplement(course.href)
except ElectionRepeatedError as e:
ferr.error(e)
cout.warning("ElectionRepeatedError encountered")
_ignore_course(course, "Repeated")
_add_error(e)
except TimeConflictError as e:
ferr.error(e)
cout.warning("TimeConflictError encountered")
_ignore_course(course, "Time conflict")
_add_error(e)
except ExamTimeConflictError as e:
ferr.error(e)
cout.warning("ExamTimeConflictError encountered")
_ignore_course(course, "Exam time conflict")
_add_error(e)
except ElectionPermissionError as e:
ferr.error(e)
cout.warning("ElectionPermissionError encountered")
_ignore_course(course, "Permission required")
_add_error(e)
except CreditsLimitedError as e:
ferr.error(e)
cout.warning("CreditsLimitedError encountered")
_ignore_course(course, "Credits limited")
_add_error(e)
except MutexCourseError as e:
ferr.error(e)
cout.warning("MutexCourseError encountered")
_ignore_course(course, "Mutual exclusive")
_add_error(e)
except MultiEnglishCourseError as e:
ferr.error(e)
cout.warning("MultiEnglishCourseError encountered")
_ignore_course(course, "Multi English course")
_add_error(e)
except MultiPECourseError as e:
ferr.error(e)
cout.warning("MultiPECourseError encountered")
_ignore_course(course, "Multi PE course")
_add_error(e)
except ElectionFailedError as e:
ferr.error(e)
cout.warning("ElectionFailedError encountered") # 具体原因不明,且不能马上重试
_add_error(e)
except QuotaLimitedError as e:
ferr.error(e)
# 选课网可能会发回异常数据,本身名额 180/180 的课会发 180/0,这个时候选课会得到这个错误
if course.used_quota == 0:
cout.warning("Abnormal status of %s, a bug of 'elective.pku.edu.cn' found" % course)
else:
ferr.critical("Unexcepted behaviour") # 没有理由运行到这里
_add_error(e)
except ElectionSuccess as e:
# 不从此处加入 ignored,而是在下回合根据教学网返回的实际选课结果来决定是否忽略
cout.info("%s is ELECTED !" % course)
# --------------------------------------------------------------------------
# Issue #25
# --------------------------------------------------------------------------
# 但是动态地更新 elected,如果同一回合内有多门课可以被选,并且根据 mutex rules,
# 低优先级的课和刚选上的高优先级课冲突,那么轮到低优先级的课提交选课请求的时候,
# 根据这个动态更新的 elected 它将会被提前地忽略(而不是留到下一循环回合的开始时才被忽略)
# --------------------------------------------------------------------------
r = e.response # get response from error ... a bit ugly
tables = get_tables(r._tree)
# use clear() + extend() instead of op `=` to ensure `id(elected)` doesn't change
elected.clear()
elected.extend(get_courses(tables[1]))
except RuntimeError as e:
ferr.critical(e)
ferr.critical("RuntimeError with Course(name=%r, class_no=%d, school=%r, status=%s, href=%r)" % (
course.name, course.class_no, course.school, course.status, course.href))
# use this private function of 'hook.py' to dump the response from `get_SupplyCancel` or `get_supplement`
file = _dump_request(page_r)
ferr.critical("Dump response from 'get_SupplyCancel / get_supplement' to %s" % file)
raise e
except Exception as e:
raise e # don't increase error count here
except UserInputException as e:
cout.error(e)
_add_error(e)
raise e
except (ServerError, StatusCodeError) as e:
ferr.error(e)
cout.warning("ServerError/StatusCodeError encountered")
_add_error(e)
except OperationFailedError as e:
ferr.error(e)
cout.warning("OperationFailedError encountered")
_add_error(e)
except RequestException as e:
ferr.error(e)
cout.warning("RequestException encountered")
_add_error(e)
except IAAAException as e:
ferr.error(e)
cout.warning("IAAAException encountered")
_add_error(e)
except _ElectiveNeedsLogin as e:
cout.info("client: %s needs Login" % elective.id)
reloginPool.put_nowait(elective)
elective = None
noWait = True
except (SessionExpiredError, InvalidTokenError, NoAuthInfoError, SharedSessionError) as e:
ferr.error(e)
_add_error(e)
cout.info("client: %s needs relogin" % elective.id)
reloginPool.put_nowait(elective)
elective = None
noWait = True
except CaughtCheatingError as e:
ferr.critical(e) # critical error !
_add_error(e)
raise e
except SystemException as e:
ferr.error(e)
cout.warning("SystemException encountered")
_add_error(e)
except TipsException as e:
ferr.error(e)
cout.warning("TipsException encountered")
_add_error(e)
except OperationTimeoutError as e:
ferr.error(e)
cout.warning("OperationTimeoutError encountered")
_add_error(e)
except json.JSONDecodeError as e:
ferr.error(e)
cout.warning("JSONDecodeError encountered")
_add_error(e)
except KeyboardInterrupt as e:
raise e
except Exception as e:
ferr.exception(e)
_add_error(e)
raise e
finally:
if elective is not None: # change elective client
electivePool.put_nowait(elective)
elective = None
if noWait:
cout.info("")
cout.info("======== END Loop %d ========" % environ.elective_loop)
cout.info("")
else:
t = _get_refresh_interval()
cout.info("")
cout.info("======== END Loop %d ========" % environ.elective_loop)
cout.info("Main loop sleep %s s" % t)
cout.info("")
time.sleep(t)
| 33.582813
| 125
| 0.512772
|
65dcc391d1c05b0e123f92f31201e536b4a472e1
| 8,312
|
py
|
Python
|
coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 2,740
|
2017-10-03T23:19:01.000Z
|
2022-03-30T15:16:39.000Z
|
coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 1,057
|
2017-10-05T22:47:01.000Z
|
2022-03-31T23:51:15.000Z
|
coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 510
|
2017-10-04T19:22:28.000Z
|
2022-03-31T12:16:52.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import numpy as np
from coremltools.converters.mil import Builder as mb
from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import register_generic_pass
"""
Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers.
The scale const can be a single number (scalar) or a vector with a broacasable shape,
for instance, if the output of the conv/deconv layer is (B, Cout, H, W),
const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed.
Given:
%2 = conv(%1)
...
%3 = mul(%2, constant) # where constant is the scale constant
...
Result:
%3 = conv(%1)
...
"""
arbitrary_cin = 5
arbitrary_cout = 8
arbitrary_scalar = 5
np.random.seed()
arbitrary_input = (3, arbitrary_cin, 224, 224)
arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10)
@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)])
def conv_scale_mul(x):
conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv")
mul = mb.mul(x=conv, y=arbitrary_scalar, name="scale")
return mul
@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)])
def conv_transpose_scale_mul(x):
conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv")
mul = mb.mul(x=conv, y=arbitrary_scalar, name="scale")
return mul
@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)])
def conv_scale_div(x):
conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv")
real_div = mb.real_div(x=conv, y=arbitrary_scalar, name="scale")
return real_div
@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)])
def conv_transpose_scale_div(x):
conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv")
real_div = mb.real_div(x=conv, y=arbitrary_scalar, name="scale")
return real_div
def _cin_cout(pattern):
# D_in denotes the spatial dimensions for conv kernel weight
# for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]
# for conv, conv_weight has shape [Cout, Cin / groups, *D_in]
is_deconv = pattern.conv.op_type == "conv_transpose"
groups = pattern.conv.groups.val
conv_weight = pattern.conv.weight.val
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
return Cin, Cout
def _is_scalar(pattern):
# for the scalar case, the scalar can be either
# 1. a python int/float
# 2. a 0d numpy array
# 3. a 1d numpy array with shape (1,)
scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y
scale = scale_var.val
is_scalar = True
if isinstance(scale, np.ndarray):
if scale.shape == ():
scale = scale.tolist()
elif scale.shape == (1) or scale.shape == (1,):
scale = scale[0]
else:
is_scalar = False
return is_scalar
def var_constraints(pattern):
passed = True
passed = passed and pattern.scale.x.val is not None or pattern.scale.y.val is not None
passed = passed and pattern.conv.weight.val is not None
is_scalar = _is_scalar(pattern)
Cin, Cout = _cin_cout(pattern)
scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y
scale = scale_var.val
# for the vector scale case, check if the shape is broacastable
if not is_scalar:
conv_weight = pattern.conv.weight.val
passed = passed and (
np.product(scale.shape) == Cout
or (len(scale.shape) == len(conv_weight.shape) and scale.shape[1] == Cout)
or (len(scale.shape) == len(conv_weight.shape) - 1 and scale.shape[0] == Cout)
)
return passed
def transform_pattern(pattern):
# get the scale
scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y
scale = scale_var.val
is_scalar = _is_scalar(pattern)
# get weight and bias and groups from conv layer
conv_weight = pattern.conv.weight.val
conv_bias = pattern.conv.bias
groups = pattern.conv.groups.val
# get type of the conv layer
is_deconv = pattern.conv.op_type == "conv_transpose"
is_conv_1d = len(conv_weight.shape) == 3
Cin, Cout = _cin_cout(pattern)
# transform the scale to 1./scale for the real_div case
if pattern.scale.op_type == "real_div":
scale = 1.0 / scale
# get the type of the conv weight
conv_weight_type = conv_weight.dtype
# create bias for conv if not exist
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
# get the original shape of weight and bias
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
# update the weight/bias for conv layer
if is_scalar:
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)
else:
scale = np.reshape(scale, (Cout))
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = []
if is_deconv:
conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))
for i in range(Cout):
_conv_weight = conv_weight[i] * scale[i]
new_conv_weight.append(_conv_weight)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
# make sure the updated weight and bias have the same shape as the original ones
assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass."
assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass."
# create a new conv op with the new weight, bias value, copying rest of the attributes
out_name = pattern.scale.outputs[0].name
conv_kargs = {
"weight": new_conv_weight,
"bias": new_conv_bias,
"name": out_name,
"before_op": pattern.conv,
}
for k, v in pattern.conv.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
pattern.scale.enclosing_block.replace_uses_of_var_after_op(
anchor_op=pattern.scale, old_var=pattern.scale.outputs[0], new_var=x
)
# Remove all the ops at once
pattern.block.remove_ops(pattern.op_list())
if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1":
register_generic_pass(
ops_arrangement=conv_scale_mul,
var_constraints=var_constraints,
transform_pattern=transform_pattern,
pass_name="fuse_conv_scale",
namespace="common",
)
register_generic_pass(
ops_arrangement=conv_transpose_scale_mul,
var_constraints=var_constraints,
transform_pattern=transform_pattern,
pass_name="fuse_conv_scale",
namespace="common",
)
register_generic_pass(
ops_arrangement=conv_scale_div,
var_constraints=var_constraints,
transform_pattern=transform_pattern,
pass_name="fuse_conv_scale",
namespace="common",
)
register_generic_pass(
ops_arrangement=conv_transpose_scale_div,
var_constraints=var_constraints,
transform_pattern=transform_pattern,
pass_name="fuse_conv_scale",
namespace="common",
)
| 34.065574
| 140
| 0.67962
|
2c932938dd9321c9864716585a0f7bd206dcd499
| 4,778
|
py
|
Python
|
examples/pes-1/prep_molpro.py
|
cbannwarth/aimsprop
|
9efd317f9d1e8f66e33b7a468845d5ace3e1852d
|
[
"MIT"
] | null | null | null |
examples/pes-1/prep_molpro.py
|
cbannwarth/aimsprop
|
9efd317f9d1e8f66e33b7a468845d5ace3e1852d
|
[
"MIT"
] | null | null | null |
examples/pes-1/prep_molpro.py
|
cbannwarth/aimsprop
|
9efd317f9d1e8f66e33b7a468845d5ace3e1852d
|
[
"MIT"
] | null | null | null |
import os
import re
import sys
import numpy as np
import aimsprop.atom_data as atom_data
import aimsprop.manage_xyz as manage_xyz
# TODO: make more invariant to filename (sbatch.sh)
# run molpro calculations to obtain ionization potential information
def prep_molpro(
opt_in,
):
"""Runs molpro calculations from template files and traj object
Params:
opt_in - [dict], options to override defaults
"""
options = {
# all paths must be absolute!
"neutral_input": None, # molpro input file for neutral
"cation_input": None, # molpro input file for cation
"submit_input": None, # sbatch file (runs neutral & cation in same job)
"out_dir": None, # dir for output
"submit": False, # whether to submit or just prep the job
"cation_states": None, # dict cation states corresponding to frame states
"traj": None, # traj object to run (all frames)
"neutral_input2": "neutral.dat", # modified molpro input file for neutral (must match sbatch.sh
"cation_input2": "cation.dat", # modified molpro input file for cation
"submit_input2": "sbatch.sh", # modified input script
"njobs": 25, # modified input script
}
for key, val in list(opt_in.items()):
options[key] = val
# TODO: throw error messages for invalid combination of options
# override options
neutral_input = options["neutral_input"]
cation_input = options["cation_input"]
submit_input = options["submit_input"]
neutral_input2 = options["neutral_input2"]
cation_input2 = options["cation_input2"]
submit_input2 = options["submit_input2"]
out_dir = options["out_dir"]
cation_states = options["cation_states"]
submit = options["submit"]
traj = options["traj"]
njobs = options["njobs"]
frames_per_job = len(traj.frames) / njobs
nframes = len(traj.frames)
for job in range(njobs + 1):
os.chdir(out_dir)
os.system("mkdir %02djob" % job)
os.system("cp %s %02djob" % (submit_input, job))
os.system("cp run_molpro.py %02djob" % job)
os.chdir("%02djob" % job)
ind1 = frames_per_job * job
ind2 = frames_per_job * (job + 1)
if job == njobs:
ind2 = nframes
# update submission script
update_sbatch("%d" % job, submit_input2, ind1=ind1, ind2=ind2)
for ind, frame in enumerate(traj.frames[ind1:ind2]):
# set up directory
os.system("mkdir %04d" % (ind + ind1))
os.chdir("%04d" % (ind + ind1))
# modify neutral molpro params
symbols = [atom_data.atom_symbol_table[N] for N in frame.N]
insert_geom(frame.xyz, symbols, neutral_input, neutral_input2)
update_state(frame.I, neutral_input2)
write_xyz(frame, symbols, "geom.xyz")
# modify cation molpro params
symbols = [atom_data.atom_symbol_table[N] for N in frame.N]
insert_geom(frame.xyz, symbols, cation_input, cation_input2)
cI = cation_states[frame.I]
update_state(cI, cation_input2)
# create file to identify frame with job
os.system(
"echo 'labels: %4d %4d\nt: %12.6f' > %s"
% (frame.label[0], frame.label[1], frame.t, "%04d_ID.txt" % ind)
)
# return to working directory
os.chdir("../")
# if requested, submit the job
if submit == True:
os.system("sbatch %s" % submit_input2)
os.chdir("../")
def insert_geom(
xyz,
symbols,
molpro_in,
molpro_out,
):
lines = open(molpro_in).readlines()
fh = open(molpro_out, "w")
for line in lines:
mobj = re.match("\s*geometry=\{\s*", line)
fh.write(line)
if mobj:
for ind, atom in enumerate(xyz):
fh.write(
"%6s %12.6f %12.6f %12.6f\n"
% (symbols[ind], atom[0], atom[1], atom[2])
)
def update_state(
I,
molpro_in,
):
# Note: molpro is 1 based in state indexing (1 = groundstate, 2 = first excited state)
# Note: fms is also 1 based in state indexing
os.system('sed -i "s/xstate/%s/g" %s' % (I, molpro_in))
def update_sbatch(
new_txt,
sbatch_in,
ind1,
ind2,
):
os.system('sed -i "s/ID/%s/g" %s' % (new_txt, sbatch_in))
os.system('sed -i "s/ind1/%s/g" %s' % (ind1, sbatch_in))
os.system('sed -i "s/ind2/%s/g" %s' % (ind2, sbatch_in))
def write_xyz(
frame,
symbols,
filename,
):
geom = []
for ind, atom in enumerate(frame.xyz):
geom.append((symbols[ind], atom[0], atom[1], atom[2]))
manage_xyz.write_xyz(filename, geom)
| 30.433121
| 104
| 0.595856
|
d3cd0d33673aa649786f1b4d9c8c94a842458964
| 3,266
|
py
|
Python
|
data_science_demo1.py
|
djrlj694/Python-Demo
|
b97a98bbb828ea60c383bb20888d647cad047075
|
[
"Unlicense"
] | null | null | null |
data_science_demo1.py
|
djrlj694/Python-Demo
|
b97a98bbb828ea60c383bb20888d647cad047075
|
[
"Unlicense"
] | null | null | null |
data_science_demo1.py
|
djrlj694/Python-Demo
|
b97a98bbb828ea60c383bb20888d647cad047075
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cx_Oracle
import impala.dbapi
#from impala.dbapi import connect
from impala.util import as_pandas
# Convenience classes
class Database(object):
def __init__(self):
""" The constructor for the class 'Database'.
Args:
N/A
Returns (Database):
An instance of 'Database'.
"""
self.platform = input("Enter data platform: ").upper()
if self.platform in ['HADOOP', 'HIVE', 'IMPALA']:
self.host = 'DNS_HOSTNAME_PLACEHOLDER'
self.port = 10000
elif self.platform == 'ORACLE':
self.host = 'DNS_HOSTNAME_PLACEHOLDER'
self.port = 1521
def connect(self, srvc):
""" Returns a database connection object.
Args:
srvc (str): A database service name (Oracle only).
Returns (Settings):
A database connection object.
"""
person = Person()
if self.platform in ['HADOOP', 'HIVE', 'IMPALA']:
return impala.dbapi.connect(host=self.host,
port=self.port,
auth_mechanism='PLAIN',
user=person.user,
password=person.password)
elif self.platform == 'ORACLE':
return cx_Oracle.connect("%s/%s@%s/%s" % (person.user, person.password, self.host, srvc))
class Person(object):
def __init__(self):
""" The constructor for the class 'Person'.
Args:
N/A
Returns (Person):
An instance of 'Person'.
"""
self.user = input("Enter username: ")
self.password = input("Enter password: ")
def main():
# Load database settings.
db = Database()
# Specify query.
srce_db = input("Enter database name -- e.g. 'EDWSTATS': ")
srce_tbl = input("Enter table name -- e.g. 'L_QC_COLUMN_RESULT': ")
query_template = '''
SELECT COUNT(*) AS row_count
FROM {}.{}
'''
query = query_template.format(srce_db, srce_tbl)
# Connect to database.
conn = db.connect('ds01')
# Execute query.
cur = conn.cursor()
cur.execute(query)
conn.commit();
# Convert query results to data frame.
df = as_pandas(cur)
# Disconnect from database.
conn.close()
# Demo some basic data frame operations.
# For more info, see:
# http://www.datadependence.com/2016/05/scientific-python-pandas/
result1 = df.head(1) # Show first row.
result2 = df.head(2) # Show first 2 rows.
result3 = df.tail(1) # Show last row.
result4 = len(df) # Show # of rows.
result5 = df.describe # Show data frame summary stats
print("df.head(1) = {}".format(result1))
print("df.head(2) = {}".format(result2))
print("df.tail(1) = {}".format(result3))
print("len(df) = {}".format(result4))
print("df.describe = {}".format(result5))
# NOTE: For unit testing purposes, execute as self-test only if being run as a top-level script.
if __name__ == "__main__":
main()
| 26.552846
| 101
| 0.543172
|
42b58a194be46ca628faacad3d0cebe522361215
| 2,505
|
py
|
Python
|
src/facebook.py
|
peicap/shadowads
|
d49fa962013e6cc136a24e7720f0c97bdb40cc63
|
[
"MIT"
] | null | null | null |
src/facebook.py
|
peicap/shadowads
|
d49fa962013e6cc136a24e7720f0c97bdb40cc63
|
[
"MIT"
] | null | null | null |
src/facebook.py
|
peicap/shadowads
|
d49fa962013e6cc136a24e7720f0c97bdb40cc63
|
[
"MIT"
] | null | null | null |
import time
from random import randint
from bs4 import BeautifulSoup
import re
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
rootUrl = 'https://mbasic.facebook.com'
groups = []
message = """PEIADS MARKETING TEAM
- NO AGE LIMIT
- NO DEPOSITS
- EARN MONEY AT HOME
If you like posting content and answering questions then this is the job for you!
You will be earning 250PHP by posting, answering, sharing contents and surveys.
Minimun payout is 250PHP via Paypal.
Visit the page to get an invitation!
http://peidev.top/blog/2017/10/13/how-to-earn-money-online-doing-surveys-in-the-philippines.html"""
file = open('./output/fb-groups.txt', 'w')
# start
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 10)
driver.implicitly_wait(30)
driver.set_window_size(400, 600)
driver.get(rootUrl)
print('Waiting for login elements')
wait.until(EC.presence_of_element_located(
(By.XPATH, '//input[@type="password"]')))
print('Login Available')
inputUsername = driver.find_element_by_id('m_login_email')
inputPassword = driver.find_element_by_xpath('//input[@type="password"]')
inputUsername.send_keys(username)
inputPassword.send_keys(password)
inputPassword.send_keys(Keys.RETURN)
print('Successfully logged in.')
driver.get('https://mbasic.facebook.com/groups/?seemore&refid=27')
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
for tag in soup.find_all('a', href=re.compile(r'groups/\d')):
print('saving in fb-groups.txt: ' + tag.text)
groups.append(rootUrl + tag['href'])
# time.sleep(0.05)
for group in groups:
link = rootUrl + group
file.write("%s\n" % link)
print('Sucessfull wrote all group links to fb-groups.txt')
for group in groups:
try:
driver.get(group)
wait.until(EC.presence_of_element_located(
(By.XPATH, '//textarea[@name="xc_message"]')))
textarea = driver.find_element_by_xpath('//textarea[@name="xc_message"]')
textarea.send_keys(message)
print('YES')
except NoSuchElementException:
print('No text-area found skipping...')
continue
| 31.708861
| 99
| 0.748902
|
d211818a602b24345604d1e2a4be925908a0a64e
| 6,814
|
py
|
Python
|
django_filters/rest_framework/backends.py
|
kimbriancanavan/django-filter
|
3e1f32a8c9bbc49d40fd6aa57471b8b6eb1d32c6
|
[
"BSD-3-Clause"
] | null | null | null |
django_filters/rest_framework/backends.py
|
kimbriancanavan/django-filter
|
3e1f32a8c9bbc49d40fd6aa57471b8b6eb1d32c6
|
[
"BSD-3-Clause"
] | null | null | null |
django_filters/rest_framework/backends.py
|
kimbriancanavan/django-filter
|
3e1f32a8c9bbc49d40fd6aa57471b8b6eb1d32c6
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from django import forms
from django.template import loader
from django.utils.deprecation import RenameMethodsBase
from .. import compat, utils, fields
from . import filters, filterset
# TODO: remove metaclass in 2.1
class RenameAttributes(utils.RenameAttributesBase, RenameMethodsBase):
renamed_attributes = (
('default_filter_set', 'filterset_base', utils.MigrationNotice),
)
renamed_methods = (
('get_filter_class', 'get_filterset_class', utils.MigrationNotice),
)
class DjangoFilterBackend(metaclass=RenameAttributes):
filterset_base = filterset.FilterSet
raise_exception = True
@property
def template(self):
if compat.is_crispy():
return 'django_filters/rest_framework/crispy_form.html'
return 'django_filters/rest_framework/form.html'
def get_filterset(self, request, queryset, view):
filterset_class = self.get_filterset_class(view, queryset)
if filterset_class is None:
return None
kwargs = self.get_filterset_kwargs(request, queryset, view)
return filterset_class(**kwargs)
def get_filterset_class(self, view, queryset=None):
"""
Return the `FilterSet` class used to filter the queryset.
"""
filterset_class = getattr(view, 'filterset_class', None)
filterset_fields = getattr(view, 'filterset_fields', None)
# TODO: remove assertion in 2.1
if filterset_class is None and hasattr(view, 'filter_class'):
utils.deprecate(
"`%s.filter_class` attribute should be renamed `filterset_class`."
% view.__class__.__name__)
filterset_class = getattr(view, 'filter_class', None)
# TODO: remove assertion in 2.1
if filterset_fields is None and hasattr(view, 'filter_fields'):
utils.deprecate(
"`%s.filter_fields` attribute should be renamed `filterset_fields`."
% view.__class__.__name__)
filterset_fields = getattr(view, 'filter_fields', None)
if filterset_class:
filterset_model = filterset_class._meta.model
# FilterSets do not need to specify a Meta class
if filterset_model and queryset is not None:
assert issubclass(queryset.model, filterset_model), \
'FilterSet model %s does not match queryset model %s' % \
(filterset_model, queryset.model)
return filterset_class
if filterset_fields and queryset is not None:
MetaBase = getattr(self.filterset_base, 'Meta', object)
class AutoFilterSet(self.filterset_base):
class Meta(MetaBase):
model = queryset.model
fields = filterset_fields
return AutoFilterSet
return None
def get_filterset_kwargs(self, request, queryset, view):
return {
'data': request.query_params,
'queryset': queryset,
'request': request,
}
def filter_queryset(self, request, queryset, view):
filterset = self.get_filterset(request, queryset, view)
if filterset is None:
return queryset
if not filterset.is_valid() and self.raise_exception:
raise utils.translate_validation(filterset.errors)
return filterset.qs
def to_html(self, request, queryset, view):
filterset = self.get_filterset(request, queryset, view)
if filterset is None:
return None
template = loader.get_template(self.template)
context = {'filter': filterset}
return template.render(context, request)
def get_coreschema_field(self, field):
if isinstance(field, filters.NumberFilter):
field_cls = compat.coreschema.Number
else:
field_cls = compat.coreschema.String
return field_cls(
description=str(field.extra.get('help_text', ''))
)
def get_schema_fields(self, view):
# This is not compatible with widgets where the query param differs from the
# filter's attribute name. Notably, this includes `MultiWidget`, where query
# params will be of the format `<name>_0`, `<name>_1`, etc...
assert compat.coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert compat.coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
try:
queryset = view.get_queryset()
except Exception:
queryset = None
warnings.warn(
"{} is not compatible with schema generation".format(view.__class__)
)
filterset_class = self.get_filterset_class(view, queryset)
return [] if not filterset_class else [
compat.coreapi.Field(
name=field_name,
required=field.extra['required'],
location='query',
schema=self.get_coreschema_field(field)
) for field_name, field in filterset_class.base_filters.items()
]
def get_schema_operation_parameters(self, view):
try:
queryset = view.get_queryset()
except Exception:
queryset = None
warnings.warn(
"{} is not compatible with schema generation".format(view.__class__)
)
filterset_class = self.get_filterset_class(view, queryset)
if not filterset_class:
return []
parameters = []
for field_name, field in filterset_class.base_filters.items():
parameter = {
'name': field_name,
'required': field.extra['required'],
'in': 'query',
'description': field.label if field.label is not None else field_name,
'schema': {
'type': 'boolean' if isinstance(field.field_class, forms.NullBooleanField) else 'string',
},
}
if issubclass(field.field_class, (fields.MultipleChoiceField, fields.ModelMultipleChoiceField)):
parameter['schema'] = {
'type': 'array',
'items': {
'type': 'string'
}
}
parameter['explode'] = True
if field.extra and 'choices' in field.extra:
if parameter['schema']['type'] == 'array':
parameter['schema']['items']['enum'] = [c[0] for c in field.extra['choices']]
else:
parameter['schema']['enum'] = [c[0] for c in field.extra['choices']]
parameters.append(parameter)
return parameters
| 36.832432
| 109
| 0.603904
|
8f874771e1b4e8325d357a731bddb06bcde77d1f
| 16,951
|
py
|
Python
|
numba/tests/test_array_reductions.py
|
ehsantn/numba
|
4749ef7ccc630b7f649ec972497bc5b7fca79303
|
[
"BSD-2-Clause",
"MIT"
] | 1
|
2019-02-03T21:16:20.000Z
|
2019-02-03T21:16:20.000Z
|
numba/tests/test_array_reductions.py
|
ehsantn/numba
|
4749ef7ccc630b7f649ec972497bc5b7fca79303
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
numba/tests/test_array_reductions.py
|
ehsantn/numba
|
4749ef7ccc630b7f649ec972497bc5b7fca79303
|
[
"BSD-2-Clause",
"MIT"
] | 1
|
2019-12-31T22:54:30.000Z
|
2019-12-31T22:54:30.000Z
|
from __future__ import division
from itertools import product
import numpy as np
from numba import unittest_support as unittest
from numba import jit, typeof
from numba.compiler import compile_isolated
from numba.numpy_support import version as np_version
from .support import TestCase, MemoryLeakMixin, tag
def array_all(arr):
return arr.all()
def array_all_global(arr):
return np.all(arr)
def array_any(arr):
return arr.any()
def array_any_global(arr):
return np.any(arr)
def array_cumprod(arr):
return arr.cumprod()
def array_cumprod_global(arr):
return np.cumprod(arr)
def array_cumsum(arr):
return arr.cumsum()
def array_cumsum_global(arr):
return np.cumsum(arr)
def array_sum(arr):
return arr.sum()
def array_sum_global(arr):
return np.sum(arr)
def array_prod(arr):
return arr.prod()
def array_prod_global(arr):
return np.prod(arr)
def array_mean(arr):
return arr.mean()
def array_mean_global(arr):
return np.mean(arr)
def array_var(arr):
return arr.var()
def array_var_global(arr):
return np.var(arr)
def array_std(arr):
return arr.std()
def array_std_global(arr):
return np.std(arr)
def array_min(arr):
return arr.min()
def array_min_global(arr):
return np.min(arr)
def array_max(arr):
return arr.max()
def array_max_global(arr):
return np.max(arr)
def array_argmin(arr):
return arr.argmin()
def array_argmin_global(arr):
return np.argmin(arr)
def array_argmax(arr):
return arr.argmax()
def array_argmax_global(arr):
return np.argmax(arr)
def array_median_global(arr):
return np.median(arr)
def array_nanmin(arr):
return np.nanmin(arr)
def array_nanmax(arr):
return np.nanmax(arr)
def array_nanmean(arr):
return np.nanmean(arr)
def array_nansum(arr):
return np.nansum(arr)
def array_nanprod(arr):
return np.nanprod(arr)
def array_nanstd(arr):
return np.nanstd(arr)
def array_nanvar(arr):
return np.nanvar(arr)
def array_nanmedian_global(arr):
return np.nanmedian(arr)
def base_test_arrays(dtype):
if dtype == np.bool_:
def factory(n):
assert n % 2 == 0
return np.bool_([0, 1] * (n // 2))
else:
def factory(n):
return np.arange(n, dtype=dtype) + 1
a1 = factory(10)
a2 = factory(10).reshape(2, 5)
# The prod() of this array fits in a 32-bit int
a3 = (factory(12))[::-1].reshape((2, 3, 2), order='A')
assert not (a3.flags.c_contiguous or a3.flags.f_contiguous)
return [a1, a2, a3]
def full_test_arrays(dtype):
array_list = base_test_arrays(dtype)
# Add floats with some mantissa
if dtype == np.float32:
array_list += [a / 10 for a in array_list]
for a in array_list:
assert a.dtype == np.dtype(dtype)
return array_list
def run_comparative(compare_func, test_array):
arrty = typeof(test_array)
cres = compile_isolated(compare_func, [arrty])
numpy_result = compare_func(test_array)
numba_result = cres.entry_point(test_array)
return numpy_result, numba_result
class TestArrayReductions(MemoryLeakMixin, TestCase):
"""
Test array reduction methods and functions such as .sum(), .max(), etc.
"""
def setUp(self):
super(TestArrayReductions, self).setUp()
np.random.seed(42)
def check_reduction_basic(self, pyfunc, all_nans=True, **kwargs):
# Basic reduction checks on 1-d float64 arrays
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr), **kwargs)
arr = np.float64([1.0, 2.0, 0.0, -0.0, 1.0, -1.5])
check(arr)
arr = np.float64([-0.0, -1.5])
check(arr)
arr = np.float64([-1.5, 2.5, 'inf'])
check(arr)
arr = np.float64([-1.5, 2.5, '-inf'])
check(arr)
arr = np.float64([-1.5, 2.5, 'inf', '-inf'])
check(arr)
arr = np.float64(['nan', -1.5, 2.5, 'nan', 3.0])
check(arr)
arr = np.float64(['nan', -1.5, 2.5, 'nan', 'inf', '-inf', 3.0])
check(arr)
if all_nans:
# Only NaNs
arr = np.float64(['nan', 'nan'])
check(arr)
@tag('important')
def test_all_basic(self, pyfunc=array_all):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
arr = np.float64([1.0, 0.0, float('inf'), float('nan')])
check(arr)
arr[1] = -0.0
check(arr)
arr[1] = 1.5
check(arr)
arr = arr.reshape((2, 2))
check(arr)
check(arr[::-1])
@tag('important')
def test_any_basic(self, pyfunc=array_any):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
arr = np.float64([0.0, -0.0, 0.0, 0.0])
check(arr)
arr[2] = float('nan')
check(arr)
arr[2] = float('inf')
check(arr)
arr[2] = 1.5
check(arr)
arr = arr.reshape((2, 2))
check(arr)
check(arr[::-1])
@tag('important')
def test_sum_basic(self):
self.check_reduction_basic(array_sum)
@tag('important')
def test_mean_basic(self):
self.check_reduction_basic(array_mean)
@tag('important')
def test_var_basic(self):
self.check_reduction_basic(array_var, prec='double')
@tag('important')
def test_std_basic(self):
self.check_reduction_basic(array_std)
@tag('important')
def test_min_basic(self):
self.check_reduction_basic(array_min)
@tag('important')
def test_max_basic(self):
self.check_reduction_basic(array_max)
@tag('important')
def test_argmin_basic(self):
self.check_reduction_basic(array_argmin)
@tag('important')
def test_argmax_basic(self):
self.check_reduction_basic(array_argmax)
@tag('important')
def test_nanmin_basic(self):
self.check_reduction_basic(array_nanmin)
@tag('important')
def test_nanmax_basic(self):
self.check_reduction_basic(array_nanmax)
@tag('important')
@unittest.skipUnless(np_version >= (1, 8), "nanmean needs Numpy 1.8+")
def test_nanmean_basic(self):
self.check_reduction_basic(array_nanmean)
@tag('important')
def test_nansum_basic(self):
# Note Numpy < 1.9 has different behaviour for all NaNs:
# it returns Nan while later Numpy returns 0.
self.check_reduction_basic(array_nansum,
all_nans=np_version >= (1, 9))
@tag('important')
@unittest.skipUnless(np_version >= (1, 10), "nanprod needs Numpy 1.10+")
def test_nanprod_basic(self):
self.check_reduction_basic(array_nanprod)
@tag('important')
@unittest.skipUnless(np_version >= (1, 8), "nanstd needs Numpy 1.8+")
def test_nanstd_basic(self):
self.check_reduction_basic(array_nanstd)
@tag('important')
@unittest.skipUnless(np_version >= (1, 8), "nanvar needs Numpy 1.8+")
def test_nanvar_basic(self):
self.check_reduction_basic(array_nanvar, prec='double')
def check_median_basic(self, pyfunc, array_variations):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
expected = pyfunc(arr)
got = cfunc(arr)
self.assertPreciseEqual(got, expected)
# Odd sizes
def check_odd(a):
check(a)
a = a.reshape((9, 7))
check(a)
check(a.T)
for a in array_variations(np.arange(63) + 10.5):
check_odd(a)
# Even sizes
def check_even(a):
check(a)
a = a.reshape((4, 16))
check(a)
check(a.T)
for a in array_variations(np.arange(64) + 10.5):
check_even(a)
@tag('important')
def test_median_basic(self):
pyfunc = array_median_global
def variations(a):
# Sorted, reversed, random, many duplicates
yield a
a = a[::-1].copy()
yield a
np.random.shuffle(a)
yield a
a[a % 4 >= 1] = 3.5
yield a
self.check_median_basic(pyfunc, variations)
@unittest.skipUnless(np_version >= (1, 9), "nanmedian needs Numpy 1.9+")
def test_nanmedian_basic(self):
pyfunc = array_nanmedian_global
def variations(a):
# Sorted, reversed, random, many duplicates, many NaNs
yield a
a = a[::-1].copy()
yield a
np.random.shuffle(a)
yield a
a[a % 4 <= 1] = 3.5
yield a
a[a % 4 >= 2] = float('nan')
yield a
a[:] = float('nan')
yield a
self.check_median_basic(pyfunc, variations)
def test_array_sum_global(self):
arr = np.arange(10, dtype=np.int32)
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_sum_global, [arrty])
cfunc = cres.entry_point
self.assertEqual(np.sum(arr), cfunc(arr))
def test_array_prod_int_1d(self):
arr = np.arange(10, dtype=np.int32) + 1
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_prod, [arrty])
cfunc = cres.entry_point
self.assertEqual(arr.prod(), cfunc(arr))
def test_array_prod_float_1d(self):
arr = np.arange(10, dtype=np.float32) + 1 / 10
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_prod, [arrty])
cfunc = cres.entry_point
np.testing.assert_allclose(arr.prod(), cfunc(arr))
def test_array_prod_global(self):
arr = np.arange(10, dtype=np.int32)
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_prod_global, [arrty])
cfunc = cres.entry_point
np.testing.assert_allclose(np.prod(arr), cfunc(arr))
def check_cumulative(self, pyfunc):
arr = np.arange(2, 10, dtype=np.int16)
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
arr = np.linspace(2, 8, 6)
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
arr = arr.reshape((3, 2))
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
@tag('important')
def test_array_cumsum(self):
self.check_cumulative(array_cumsum)
def test_array_cumsum_global(self):
self.check_cumulative(array_cumsum_global)
@tag('important')
def test_array_cumprod(self):
self.check_cumulative(array_cumprod)
def test_array_cumprod_global(self):
self.check_cumulative(array_cumprod_global)
def check_aggregation_magnitude(self, pyfunc, is_prod=False):
"""
Check that integer overflows are avoided (issue #931).
"""
# Overflows are avoided here (ints are cast either to intp
# or float64).
n_items = 2 if is_prod else 10 # avoid overflow on prod()
arr = (np.arange(n_items) + 40000).astype('int16')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
# Overflows are avoided for functions returning floats here.
# Other functions may wrap around.
arr = (np.arange(10) + 2**60).astype('int64')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
arr = arr.astype('uint64')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
def test_sum_magnitude(self):
self.check_aggregation_magnitude(array_sum)
self.check_aggregation_magnitude(array_sum_global)
def test_cumsum_magnitude(self):
self.check_aggregation_magnitude(array_cumsum)
self.check_aggregation_magnitude(array_cumsum_global)
def test_prod_magnitude(self):
self.check_aggregation_magnitude(array_prod, is_prod=True)
self.check_aggregation_magnitude(array_prod_global, is_prod=True)
def test_cumprod_magnitude(self):
self.check_aggregation_magnitude(array_cumprod, is_prod=True)
self.check_aggregation_magnitude(array_cumprod_global, is_prod=True)
def test_mean_magnitude(self):
self.check_aggregation_magnitude(array_mean)
self.check_aggregation_magnitude(array_mean_global)
def test_var_magnitude(self):
self.check_aggregation_magnitude(array_var)
self.check_aggregation_magnitude(array_var_global)
def test_std_magnitude(self):
self.check_aggregation_magnitude(array_std)
self.check_aggregation_magnitude(array_std_global)
def _do_check_nptimedelta(self, pyfunc, arr):
arrty = typeof(arr)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Even vs. odd size, for np.median
self.assertPreciseEqual(cfunc(arr[:-1]), pyfunc(arr[:-1]))
# Test with different orders, for np.median
arr = arr[::-1].copy() # Keep 'C' layout
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
np.random.shuffle(arr)
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Test with a NaT
arr[arr.size // 2] = 'NaT'
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Test with all NaTs
arr.fill(arrty.dtype('NaT'))
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
def check_npdatetime(self, pyfunc):
arr = np.arange(10).astype(dtype='M8[Y]')
self._do_check_nptimedelta(pyfunc, arr)
def check_nptimedelta(self, pyfunc):
arr = np.arange(10).astype(dtype='m8[s]')
self._do_check_nptimedelta(pyfunc, arr)
def test_min_npdatetime(self):
self.check_npdatetime(array_min)
self.check_nptimedelta(array_min)
def test_max_npdatetime(self):
self.check_npdatetime(array_max)
self.check_nptimedelta(array_max)
def test_argmin_npdatetime(self):
self.check_npdatetime(array_argmin)
self.check_nptimedelta(array_argmin)
def test_argmax_npdatetime(self):
self.check_npdatetime(array_argmax)
self.check_nptimedelta(array_argmax)
def test_median_npdatetime(self):
self.check_nptimedelta(array_median_global)
def test_sum_npdatetime(self):
self.check_nptimedelta(array_sum)
def test_cumsum_npdatetime(self):
self.check_nptimedelta(array_cumsum)
def test_mean_npdatetime(self):
self.check_nptimedelta(array_mean)
@classmethod
def install_generated_tests(cls):
# These form a testing product where each of the combinations are tested
reduction_funcs = [array_sum, array_sum_global,
array_prod, array_prod_global,
array_mean, array_mean_global,
array_var, array_var_global,
array_std, array_std_global,
array_min, array_min_global,
array_max, array_max_global,
array_argmin, array_argmin_global,
array_argmax, array_argmax_global,
array_all, array_all_global,
array_any, array_any_global,
array_nanmax,
array_nanmin,
array_nansum,
]
if np_version >= (1, 8):
reduction_funcs += [array_nanmean, array_nanstd, array_nanvar]
if np_version >= (1, 10):
reduction_funcs += [array_nanprod]
dtypes_to_test = [np.int32, np.float32, np.bool_]
# Install tests on class
for dt in dtypes_to_test:
test_arrays = full_test_arrays(dt)
for red_func, test_array in product(reduction_funcs, test_arrays):
# Create the name for the test function
test_name = "test_{0}_{1}_{2}d".format(red_func.__name__, test_array.dtype.name, test_array.ndim)
def new_test_function(self, redFunc=red_func, testArray=test_array, testName=test_name):
npr, nbr = run_comparative(redFunc, testArray)
self.assertPreciseEqual(npr, nbr, msg=test_name, prec="single")
# Install it into the class
setattr(cls, test_name, new_test_function)
TestArrayReductions.install_generated_tests()
if __name__ == '__main__':
unittest.main()
| 29.790861
| 113
| 0.622559
|
876719b1e1e03a27cb41e5b36ed7a121bb179888
| 690
|
py
|
Python
|
var/spack/repos/builtin/packages/py-sniffio/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-sniffio/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-sniffio/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySniffio(PythonPackage):
"""This is a tiny package whose only purpose is to let you detect which
async library your code is running under."""
homepage = "https://github.com/python-trio/sniffio"
pypi = "sniffio/sniffio-1.1.0.tar.gz"
version('1.1.0', sha256='8e3810100f69fe0edd463d02ad407112542a11ffdc29f67db2bf3771afb87a21')
depends_on('py-setuptools', type='build')
depends_on('py-wheel', type='build')
depends_on('py-certifi')
| 32.857143
| 95
| 0.733333
|
bc4c0ad46d84ebd74a185c3aea717babbd4e300f
| 33,974
|
py
|
Python
|
www/src/Lib/test/test_sys.py
|
stefanhoelzl/brython
|
433d272e7bb0e3c0994392f8f265bc39e87854f7
|
[
"BSD-3-Clause"
] | 4
|
2018-03-19T12:07:18.000Z
|
2019-09-20T08:53:31.000Z
|
www/src/Lib/test/test_sys.py
|
SungBeom/BBAM_Brython
|
107036ad20a94af1d43e5ce5bd7c73e6a470d687
|
[
"BSD-3-Clause"
] | 2
|
2017-04-14T03:52:41.000Z
|
2017-04-14T04:02:06.000Z
|
www/src/Lib/test/test_sys.py
|
SungBeom/BBAM_Brython
|
107036ad20a94af1d43e5ce5bd7c73e6a470d687
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.suppress_crash_popup():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 5)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'os2', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('3P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('inP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P' + '2nPn' + 8*'n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P' + '2nPn') + 16*struct.calcsize('n2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('Pnin 2P2n2i5P 3cPn'))
# module
check(unittest, size('PnP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3n2P' + PySet_MINSIZE*'nP' + 'nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('nP'))
check(frozenset(sample), s + newsize*struct.calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
s = vsize('P2n15Pl4Pn9Pn11PI')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = vsize('P2n15Pl4Pn9Pn11PI') + struct.calcsize('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
| 37.293085
| 100
| 0.590658
|
b8f841b15b2805897958b0d459170cdbaafccb8b
| 4,786
|
wsgi
|
Python
|
backend/api/snapshot/report.wsgi
|
pstrinkle/popflip-image-stream
|
6cf1fe649ac53a91d913d1cf47cd9ec2f22765db
|
[
"Apache-2.0"
] | null | null | null |
backend/api/snapshot/report.wsgi
|
pstrinkle/popflip-image-stream
|
6cf1fe649ac53a91d913d1cf47cd9ec2f22765db
|
[
"Apache-2.0"
] | null | null | null |
backend/api/snapshot/report.wsgi
|
pstrinkle/popflip-image-stream
|
6cf1fe649ac53a91d913d1cf47cd9ec2f22765db
|
[
"Apache-2.0"
] | null | null | null |
"""Report API Call handler for snapshots."""
from pymongo import Connection
from pymongo.errors import InvalidId
from bson.objectid import ObjectId
from cgi import escape
from urlparse import parse_qs
from json import dumps
from urllib import unquote
# author will likely come from the auth session.
# missing "data" in this list, mind you it's manually checked.
POST_REQUIRED_PARAMS = ("user", "post",)
# XXX: Move into neato library.
def string_from_interwebs(input_value):
"""Given a string from the query dictionary string thing; make it clean."""
return escape(unquote(input_value))
def update_post(post_id, connection):
"""Given a post id, update it's flagged."""
database = connection['test']
collection = database['posts']
collection.update({"_id" : ObjectId(post_id)},
{"$inc" : {"flagged" : 1}})
def check_report(author_id, post_id, connection):
"""Given an author and a post, check to see if they already marked it as
enjoyed. At first, this check seemed like a waste, but why not."""
database = connection['test']
collection = database['reports']
# need to wrap with try, except
try:
post = collection.find_one({"$and" : [{"user" : ObjectId(author_id)},
{"post" : ObjectId(post_id)}]})
except InvalidId:
post = None
return post
def verify_post(post_id, connection):
"""Given a post id, check it."""
database = connection['test']
collection = database['posts']
try:
post = collection.find_one({"_id" : ObjectId(post_id)})
except InvalidId:
post = None
if post is None:
return False
return True
def verify_author(author, connection):
"""Given an author id, check it."""
database = connection['test']
collection = database['users']
try:
post = collection.find_one({"_id" : ObjectId(author)})
except InvalidId:
post = None
if post is None:
return False
return True
def insert_post_into_db(post):
"""Given a post dictionary, insert it into database collection for posts."""
if post is not None:
connection = Connection('localhost', 27017)
database = connection['test']
collection = database['reports']
# need to wrap with try, except
entry = collection.insert(post)
update_post(str(post["post"]), connection)
connection.close()
return {"id" : str(entry)}
return None
def handle_new_post(query_dict):
"""Does not handle multi-part data properly.
Also, posts don't quite exist as they should."""
for required in POST_REQUIRED_PARAMS:
if required not in query_dict:
return None
# not yet safe to use.
post_id = str(string_from_interwebs(query_dict["post"][0])).strip()
author_id = str(string_from_interwebs(query_dict["user"][0])).strip()
with Connection('localhost', 27017) as connection:
if not verify_author(author_id, connection):
return None
if not verify_post(post_id, connection):
return None
if check_report(author_id, post_id, connection) is not None:
return None
post = {"user" : ObjectId(author_id), "post" : ObjectId(post_id)}
return post
def bad_request(start_response):
"""Just does the same thing, over and over -- returns bad results.."""
output = []
output_len = sum(len(line) for line in output)
start_response('400 Bad Request',
[('Content-type', 'application/json'),
('Content-Length', str(output_len))])
return output
def application(environ, start_response):
"""Entry point for all wsgi applications."""
output = []
if environ['REQUEST_METHOD'] == 'GET':
return bad_request(start_response)
##### parameters are never safe
try:
content_length = int(environ['CONTENT_LENGTH'])
except ValueError:
content_length = 0
post_data = environ['wsgi.input'].read(content_length)
# likely throws an exception on parse error.
query_dict = parse_qs(post_data, keep_blank_values=True)
processed_post = handle_new_post(query_dict)
if processed_post is None:
return bad_request(start_response)
entry = insert_post_into_db(processed_post)
if entry is None:
return bad_request(start_response)
output.append(dumps(entry, indent=4))
# send results
output_len = sum(len(line) for line in output)
start_response('200 OK',
[('Content-type', 'application/json'),
('Content-Length', str(output_len))])
return output
| 27.988304
| 80
| 0.63435
|
c07f8725bb99eb6d3a940190dd110eebc42b2834
| 8,067
|
py
|
Python
|
testscripts/RDKB/component/PAM/TS_PAM_DeviceUsers_EnableMSOUser.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/PAM/TS_PAM_DeviceUsers_EnableMSOUser.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/PAM/TS_PAM_DeviceUsers_EnableMSOUser.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_PAM_DeviceUsers_EnableMSOUser</name>
<primitive_test_id/>
<primitive_test_name>pam_SetParameterValues</primitive_test_name>
<primitive_test_version>2</primitive_test_version>
<status>FREE</status>
<synopsis>This test case will enable the MSO user login</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>RPI</box_type>
<box_type>Emulator</box_type>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_PAM_66</test_case_id>
<test_objective>To enable the MSO user login</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,XB3</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
pam_SetParameterValues
Input:
ParamName - Device.Users.User.1.Enable</input_parameters>
<automation_approch>1.Function which needs to be tested will be configured in Test Manager GUI.
2.Python Script will be generated by Test Manager with provided arguments in configure page.
3.TM will load the PAM library via Test agent
4.From python script, invoke pam_SetParameterValues() stub function to set the MSO user login enable as true.
5.pam stub function will call the ssp_setParameterValue() function of tdk component.
6.Responses from the pam stub function will be logged in Agent Console log.
7.pam stub will validate the actual result with the expected result and send the result status to Test Manager.
8.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from pam stub.</automation_approch>
<except_output>CheckPoint 1:
MSO user login enabling log should be logged in the Agent console/Component log
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_PAM_DeviceUsers_EnableMSOUser</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
</xml>
'''
#import statement
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("pam","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_PAM_DeviceUsers_EnableMSOUser');
#Get the result of connection with test component and STB
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.Users.User.1.Enable");
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
#Get the result of execution
print "STEP 1: Get the boolean value whether MSO user login is enabled or disabled";
print "EXPECTED RESULT : Should get the boolean value whether MSO user login is enabled or disabled";
print "ACTUAL RESULT: Successfully get the boolean value, %s" %details;
print "[TEST EXECUTION RESULT] :%s" %actualresult;
tdkTestObj = obj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.Users.User.1.Enable");
tdkTestObj.addParameter("Type","boolean");
if "true" in details:
tdkTestObj.addParameter("ParamValue","false");
org_value = "true"
else:
tdkTestObj.addParameter("ParamValue","true");
org_value = "false"
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "STEP 2: Set the boolean value to true if MSO user login is disabled or viceversa";
print "EXPECTED RESLUT: Should set the boolean value to true if MSO user login is disabled or viceversa";
print "ACTUAL RESULT: Successfully set the boolean value, %s" %details;
print "[TEST EXECUTION RESULT] :%s" %actualresult;
tdkTestObj = obj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.Users.User.1.Enable");
tdkTestObj.addParameter("Type","boolean");
tdkTestObj.addParameter("ParamValue",org_value);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "STEP 3: Set the boolean value to default value";
print "EXPECTED RESLUT: Should set the boolean value to default value";
print "ACTUAL RESULT:Successfully set the boolean value to default value, %s" %details;
print "[TEST EXECUTION RESULT] :%s" %actualresult;
else:
tdkTestObj.setResultStatus("FAILURE");
print "STEP 3: Set the boolean value to default value";
print "EXPECTED RESLUT: Should set the boolean value to default value";
print "ACTUAL RESULT:Failed to set the boolean value to default value, %s" %details;
print "[TEST EXECUTION RESULT] :%s" %actualresult;
else:
tdkTestObj.setResultStatus("FAILURE");
print "STEP 2: Set the boolean value to true if MSO user login is disabled or viceversa";
print "EXPECTED RESLUT: Should set the boolean value to true if MSO user login is disabled or viceversa";
print "ACTUAL RESULT: Failed to set the boolean value, %s" %details;
print "[TEST EXECUTION RESULT] :%s" %actualresult;
else:
tdkTestObj.setResultStatus("FAILURE");
print "STEP 1: Get the boolean value whether MSO user login is enabled or disabled";
print "EXPECTED RESULT : Should get the boolean value whether MSO user login is enabled or disabled";
print "ACTUAL RESULT: Failed to get the boolean value, %s" %details;
print "[TEST EXECUTION RESULT] :%s" %actualresult;
obj.unloadModule("pam");
else:
print "Failed to load pam module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 45.320225
| 119
| 0.710425
|
5656d05e16761c60509708b3e6de8cdf4e5fd944
| 2,005
|
py
|
Python
|
app/views/auth.py
|
liam-carswell/flask-dance-multi-provider
|
86e66154ea83963d6b729c0cc5b8a45ba2555ee8
|
[
"MIT"
] | null | null | null |
app/views/auth.py
|
liam-carswell/flask-dance-multi-provider
|
86e66154ea83963d6b729c0cc5b8a45ba2555ee8
|
[
"MIT"
] | null | null | null |
app/views/auth.py
|
liam-carswell/flask-dance-multi-provider
|
86e66154ea83963d6b729c0cc5b8a45ba2555ee8
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request, redirect, url_for, flash, render_template
from flask_login import login_required, login_user, logout_user, current_user
from app.models import db, User, OAuth
from app.forms import RegisterForm, LoginForm, SetPasswordForm, SetEmailForm
blueprint = Blueprint("auth", __name__)
@blueprint.route("/register", methods=("GET", "POST"))
def register():
form = RegisterForm()
if form.validate_on_submit():
user = form.create_user()
if user:
login_user(user)
flash("Account created")
return redirect(url_for("main.index"))
return render_template("register.j2", form=form)
@blueprint.route("/login", methods=("GET", "POST"))
def login():
form = LoginForm()
if form.validate_on_submit():
user = form.get_user()
if user:
login_user(user)
flash("You have logged in")
return redirect(url_for("main.index"))
return render_template("login.j2", form=form)
@blueprint.route("/logout")
@login_required
def logout():
logout_user()
flash("You have logged out")
return redirect(url_for("main.index"))
@blueprint.route("/set_password", methods=("GET", "POST"))
@login_required
def set_password():
form = SetPasswordForm()
if form.validate_on_submit():
current_user.password = form.password.data
db.session.add(current_user)
db.session.commit()
flash("Password set successfully")
return redirect(url_for("main.index"))
return render_template("set_password.j2", form=form)
@blueprint.route("/set_email", methods=("GET", "POST"))
@login_required
def set_email():
form = SetEmailForm()
if form.validate_on_submit():
if not form.email_taken():
current_user.email = form.email.data
db.session.commit()
flash("Email set successfully")
return redirect(url_for("main.index"))
return render_template("set_email.j2", form=form)
| 30.378788
| 79
| 0.663342
|
8c51654b911a03f875af93a4c5d3b74e0ac8e047
| 1,408
|
py
|
Python
|
setup.py
|
jose-moran/GOSTnets
|
7dfbb1f1ebb4970ee49283b0d1d92af41a48624c
|
[
"MIT"
] | 11
|
2019-12-09T19:18:46.000Z
|
2021-11-05T11:26:18.000Z
|
setup.py
|
jose-moran/GOSTnets
|
7dfbb1f1ebb4970ee49283b0d1d92af41a48624c
|
[
"MIT"
] | 25
|
2019-06-19T11:30:26.000Z
|
2022-03-01T20:32:48.000Z
|
setup.py
|
jose-moran/GOSTnets
|
7dfbb1f1ebb4970ee49283b0d1d92af41a48624c
|
[
"MIT"
] | 9
|
2019-06-26T17:27:43.000Z
|
2022-03-25T10:27:10.000Z
|
from setuptools import setup, find_packages
setup(
name="GOSTnets",
version="1.0.1",
packages=find_packages(),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=[#'docutils>=0.3',
'geopandas>=0.4.0',
'networkx',
'numpy',
'osmnx>=1.0.1',
'pandas>=0.23.5',
'pyproj>=2.4.2',
'scipy',
'shapely'
],
package_data={
# If any package contains *.txt or *.rst files, include them:
# And include any *.msg files found in the 'hello' package, too:
},
# metadata to display on PyPI
author="Benjamin P. Stewart",
author_email="ben.gis.stewart@gmail.com",
description="Networkx wrapper to simplify network analysis using geospatial data",
license="PSF",
keywords="networkx networks OSM",
url="https://github.com/worldbank/GOSTnets", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/worldbank/GOSTnets/issues",
"Documentation": "https://github.com/worldbank/GOSTnets",
"Source Code": "https://github.com/worldbank/GOSTnets",
}
# could also include long_description, download_url, classifiers, etc.
)
| 34.341463
| 86
| 0.574574
|
0488244fbbcb0ac2234aa1072ba921e840989c93
| 126
|
py
|
Python
|
codewof/programming/content/en/teenagers/solution.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 3
|
2019-08-29T04:11:22.000Z
|
2021-06-22T16:05:51.000Z
|
codewof/programming/content/en/teenagers/solution.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 265
|
2019-05-30T03:51:46.000Z
|
2022-03-31T01:05:12.000Z
|
codewof/programming/content/en/teenagers/solution.py
|
samuelsandri/codewof
|
c9b8b378c06b15a0c42ae863b8f46581de04fdfc
|
[
"MIT"
] | 7
|
2019-06-29T12:13:37.000Z
|
2021-09-06T06:49:14.000Z
|
age = int(input("How old are you? "))
if 13 <= age < 20:
print("You are a teen")
else:
print("You aren't a teenager")
| 21
| 37
| 0.587302
|
87eed92ede2aa4a7270ff32d4e8efaa11b866408
| 2,306
|
py
|
Python
|
theories/parsing/slr1.py
|
xmnlab/poc-compilers
|
cd1008f7269f6dfee5afef1badc002789c865b51
|
[
"BSD-3-Clause"
] | 3
|
2021-06-21T23:51:10.000Z
|
2021-08-24T21:20:08.000Z
|
theories/parsing/slr1.py
|
xmnlab/poc-flex-bison
|
cd1008f7269f6dfee5afef1badc002789c865b51
|
[
"BSD-3-Clause"
] | 2
|
2021-06-24T15:19:52.000Z
|
2021-06-28T21:31:03.000Z
|
theories/parsing/slr1.py
|
xmnlab/poc-flex-bison
|
cd1008f7269f6dfee5afef1badc002789c865b51
|
[
"BSD-3-Clause"
] | null | null | null |
"""
gramar 1:
S -> A ( S ) B | \\varepsilon
A -> S | S B | x | \\varepsilon
B -> S B | y
Items n the initial state of the SLR(1) parsing automaton
for the previous grammar
S: .A ( S ) B
S: .
A: .S
A: .S B
A: .x
A: .
B: .S B
B: .y
gramar 2:
S -> E
E -> E + T | T
T -> T * F | F
F -> id
grammar 3:
S -> a B S | a a | a
B -> a
Parsing:
1.
S' -> .S
S -> .a B S
S -> .a a
S -> .a
B -> .a
2. (1) --S-->
S' -> S.
3. (1) --a-->
S -> a. B S
S -> a. a
S -> a.
B -> a.
f. (3) --a-->
S -> a a.
5. (3) --B-->
S -> a B. S
6. (5) --S-->
S -> a B S.
grammar 4:
S -> (T)
T -> T + int | int
String input: (int + int + int + int + int)
Parsing
00. .(int + int + int + int + int)
01. (.int + int + int + int + int) # shift
02. (int. + int + int + int + int) # shift
03. (T. + int + int + int + int) # reduce
04. (T +. int + int + int + int) # shift
05. (T + int. + int + int + int) # shift
06. (T. + int + int + int) # reduce
07. (T +. int + int + int) # shift
08. (T + int. + int + int) # shift
09. (T. + int + int) # reduce
10. (T +. int + int) # shift
11. (T + int. + int) # shift
12. (T. + int) # reduce
13. (T +. int) # shift
14. (T + int.) # shift
15. (T.) # reduce
16. (T). # shift
17. S. # reduce
n (number of "int"s) = 5
shift = 11
reduce = 6
grammar 5:
S -> (T)
T -> int + T | int
String input: (int + int + int + int + int)
Parsing
00. .(int + int + int + int + int)
01. (.int + int + int + int + int) # shift
02. (int. + int + int + int + int) # shift
03. (int +. int + int + int + int) # shift
04. (int + int. + int + int + int) # shift
05. (int + int +. int + int + int) # shift
06. (int + int + int. + int + int) # shift
07. (int + int + int +. int + int) # shift
08. (int + int + int + int. + int) # shift
09. (int + int + int + int +. int) # shift
10. (int + int + int + int + int.) # shift
11. (int + int +. int + int + T.) # reduce
12. (int + int +. int + T.) # reduce
13. (int + int +. T.) # reduce
14. (int + T.) # reduce
15. (T.) # reduce
16. (T). # shift
17. S. # reduce
n (number of "int"s) = 5
shift = 11
reduce = 6
grammar 6:
S -> b a S a b | b a S | b
Parsing:
S' -> S
S -> b a S a b
S -> b a S
S -> b
Resources:
- https://www.javatpoint.com/slr-1-parsing
- http://jsmachines.sourceforge.net/machines/slr.html
"""
| 16.013889
| 57
| 0.480919
|
186a674e7bdeccc254f9ce5c12df581b6a65d01d
| 48,473
|
py
|
Python
|
beets/library.py
|
tima/beets
|
13fede597f6c839c5c7d7a2a7fc5c8a07bb31174
|
[
"MIT"
] | null | null | null |
beets/library.py
|
tima/beets
|
13fede597f6c839c5c7d7a2a7fc5c8a07bb31174
|
[
"MIT"
] | null | null | null |
beets/library.py
|
tima/beets
|
13fede597f6c839c5c7d7a2a7fc5c8a07bb31174
|
[
"MIT"
] | null | null | null |
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The core data store and collection logic for beets.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import sys
import shlex
import unicodedata
import time
import re
from unidecode import unidecode
from beets import logging
from beets.mediafile import MediaFile, MutagenError, UnreadableFileError
from beets import plugins
from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile
from beets.util.functemplate import Template
from beets import dbcore
from beets.dbcore import types
import beets
log = logging.getLogger('beets')
# Library-specific query types.
class PathQuery(dbcore.FieldQuery):
"""A query that matches all items under a given path.
Matching can either be case-insensitive or case-sensitive. By
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
escape_re = re.compile(r'[\\_%]')
escape_char = b'\\'
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a
file or a directory.
`case_sensitive` can be a bool or `None`, indicating that the
behavior should depend on the filesystem.
"""
super(PathQuery, self).__init__(field, pattern, fast)
# By default, the case sensitivity depends on the filesystem
# that the query path is located on.
if case_sensitive is None:
path = util.bytestring_path(util.normpath(pattern))
case_sensitive = beets.util.case_sensitive(path)
self.case_sensitive = case_sensitive
# Use a normalized-case pattern for case-insensitive matches.
if not case_sensitive:
pattern = pattern.lower()
# Match the path as a single file.
self.file_path = util.bytestring_path(util.normpath(pattern))
# As a directory (prefix).
self.dir_path = util.bytestring_path(os.path.join(self.file_path, b''))
@classmethod
def is_path_query(cls, query_part):
"""Try to guess whether a unicode query part is a path query.
Condition: separator precedes colon and the file exists.
"""
colon = query_part.find(':')
if colon != -1:
query_part = query_part[:colon]
return (os.sep in query_part
and os.path.exists(syspath(normpath(query_part))))
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self):
if self.case_sensitive:
file_blob = buffer(self.file_path)
dir_blob = buffer(self.dir_path)
return '({0} = ?) || (substr({0}, 1, ?) = ?)'.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
escape = lambda m: self.escape_char + m.group(0)
dir_pattern = self.escape_re.sub(escape, self.dir_path)
dir_blob = buffer(dir_pattern + b'%')
file_pattern = self.escape_re.sub(escape, self.file_path)
file_blob = buffer(file_pattern)
return '({0} LIKE ? ESCAPE ?) || ({0} LIKE ? ESCAPE ?)'.format(
self.field), (file_blob, self.escape_char, dir_blob,
self.escape_char)
# Library-specific field types.
class DateType(types.Float):
# TODO representation should be `datetime` object
# TODO distinguish between date and time types
query = dbcore.query.DateQuery
def format(self, value):
return time.strftime(beets.config['time_format'].get(unicode),
time.localtime(value or 0))
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string, beets.config['time_format'].get(unicode))
)
except ValueError:
# Fall back to a plain timestamp number.
try:
return float(string)
except ValueError:
return self.null
class PathType(types.Type):
sql = u'BLOB'
query = PathQuery
model_type = bytes
def format(self, value):
return util.displayable_path(value)
def parse(self, string):
return normpath(bytestring_path(string))
def normalize(self, value):
if isinstance(value, unicode):
# Paths stored internally as encoded bytes.
return bytestring_path(value)
elif isinstance(value, buffer):
# SQLite must store bytestings as buffers to avoid decoding.
# We unwrap buffers to bytes.
return bytes(value)
else:
return value
def from_sql(self, sql_value):
return self.normalize(sql_value)
def to_sql(self, value):
if isinstance(value, bytes):
value = buffer(value)
return value
class MusicalKey(types.String):
"""String representing the musical key of a song.
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
r'db': 'c#',
r'eb': 'd#',
r'gb': 'f#',
r'ab': 'g#',
r'bb': 'a#',
}
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
key = re.sub(flat, sharp, key)
key = re.sub(r'[\W\s]+minor', 'm', key)
return key.capitalize()
def normalize(self, key):
if key is None:
return None
else:
return self.parse(key)
# Library-specific sort types.
class SmartArtistSort(dbcore.query.Sort):
"""Sort by artist (either album artist or track artist),
prioritizing the sort field over the raw field.
"""
def __init__(self, model_cls, ascending=True, case_insensitive=True):
self.album = model_cls is Album
self.ascending = ascending
self.case_insensitive = case_insensitive
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
field = 'albumartist' if self.album else 'artist'
collate = 'COLLATE NOCASE' if self.case_insensitive else ''
return ('(CASE {0}_sort WHEN NULL THEN {0} '
'WHEN "" THEN {0} '
'ELSE {0}_sort END) {1} {2}').format(field, collate, order)
def sort(self, objs):
if self.album:
field = lambda a: a.albumartist_sort or a.albumartist
else:
field = lambda i: i.artist_sort or i.artist
if self.case_insensitive:
key = lambda x: field(x).lower()
else:
key = field
return sorted(objs, key=key, reverse=not self.ascending)
# Special path format key.
PF_KEY_DEFAULT = 'default'
# Exceptions.
class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
error, and an unhandled Mutagen exception.
"""
def __init__(self, path, reason):
"""Create an exception describing an operation on the file at
`path` with the underlying (chained) exception `reason`.
"""
super(FileOperationError, self).__init__(path, reason)
self.path = path
self.reason = reason
def __unicode__(self):
"""Get a string representing the error. Describes both the
underlying reason and the file path in question.
"""
return u'{0}: {1}'.format(
util.displayable_path(self.path),
unicode(self.reason)
)
def __str__(self):
return unicode(self).encode('utf8')
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`).
"""
def __unicode__(self):
return u'error reading ' + super(ReadError, self).__unicode__()
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`).
"""
def __unicode__(self):
return u'error writing ' + super(WriteError, self).__unicode__()
# Item and Album model classes.
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
_format_config_key = None
"""Config key that specifies how an instance should be formatted.
"""
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
funcs.update(plugins.template_funcs())
return funcs
def store(self):
super(LibModel, self).store()
plugins.send('database_change', lib=self._db, model=self)
def remove(self):
super(LibModel, self).remove()
plugins.send('database_change', lib=self._db, model=self)
def add(self, lib=None):
super(LibModel, self).add(lib)
plugins.send('database_change', lib=self._db, model=self)
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].get(unicode)
result = self.evaluate_template(spec)
if isinstance(spec, bytes):
# if spec is a byte string then we must return a one as well
return result.encode('utf8')
else:
return result
def __str__(self):
return format(self).encode('utf8')
def __unicode__(self):
return format(self)
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
Album-level fields take precedence if `for_path` is true.
"""
def __init__(self, item, for_path=False):
super(FormattedItemMapping, self).__init__(item, for_path)
self.album = item.get_album()
self.album_keys = []
if self.album:
for key in self.album.keys(True):
if key in Album.item_keys or key not in item._fields.keys():
self.album_keys.append(key)
self.all_keys = set(self.model_keys).union(self.album_keys)
def _get(self, key):
"""Get the value for a key, either from the album or the item.
Raise a KeyError for invalid keys.
"""
if self.for_path and key in self.album_keys:
return self._get_formatted(self.album, key)
elif key in self.model_keys:
return self._get_formatted(self.model, key)
elif key in self.album_keys:
return self._get_formatted(self.album, key)
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get the value for a key. Certain unset values are remapped.
"""
value = self._get(key)
# `artist` and `albumartist` fields fall back to one another.
# This is helpful in path formats when the album artist is unset
# on as-is imports.
if key == 'artist' and not value:
return self._get('albumartist')
elif key == 'albumartist' and not value:
return self._get('artist')
else:
return value
def __iter__(self):
return iter(self.all_keys)
def __len__(self):
return len(self.all_keys)
class Item(LibModel):
_table = 'items'
_flex_table = 'item_attributes'
_fields = {
'id': types.PRIMARY_ID,
'path': PathType(),
'album_id': types.FOREIGN_ID,
'title': types.STRING,
'artist': types.STRING,
'artist_sort': types.STRING,
'artist_credit': types.STRING,
'album': types.STRING,
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'genre': types.STRING,
'composer': types.STRING,
'grouping': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'track': types.PaddedInt(2),
'tracktotal': types.PaddedInt(2),
'disc': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'lyrics': types.STRING,
'comments': types.STRING,
'bpm': types.INTEGER,
'comp': types.BOOLEAN,
'mb_trackid': types.STRING,
'mb_albumid': types.STRING,
'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
'acoustid_id': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'media': types.STRING,
'albumdisambig': types.STRING,
'disctitle': types.STRING,
'encoder': types.STRING,
'rg_track_gain': types.NULL_FLOAT,
'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
'initial_key': MusicalKey(),
'length': types.FLOAT,
'bitrate': types.ScaledInt(1000, u'kbps'),
'format': types.STRING,
'samplerate': types.ScaledInt(1000, u'kHz'),
'bitdepth': types.INTEGER,
'channels': types.INTEGER,
'mtime': DateType(),
'added': DateType(),
}
_search_fields = ('artist', 'title', 'comments',
'album', 'albumartist', 'genre')
_types = {
'data_source': types.STRING,
}
_media_fields = set(MediaFile.readable_fields()) \
.intersection(_fields.keys())
"""Set of item fields that are backed by `MediaFile` fields.
Any kind of field (fixed, flexible, and computed) may be a media
field. Only these fields are read from disk in `read` and written in
`write`.
"""
_media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys())
"""Set of item fields that are backed by *writable* `MediaFile` tag
fields.
This excludes fields that represent audio data, such as `bitrate` or
`length`.
"""
_formatter = FormattedItemMapping
_sorts = {'artist': SmartArtistSort}
_format_config_key = 'format_item'
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
getters['singleton'] = lambda i: i.album_id is None
getters['filesize'] = Item.try_filesize # In bytes.
return getters
@classmethod
def from_path(cls, path):
"""Creates a new item from the media file at the specified path.
"""
# Initiate with values that aren't read from files.
i = cls(album_id=None)
i.read(path)
i.mtime = i.current_mtime() # Initial mtime.
return i
def __setitem__(self, key, value):
"""Set the item's value for a standard field or a flexattr.
"""
# Encode unicode paths and read buffers.
if key == 'path':
if isinstance(value, unicode):
value = bytestring_path(value)
elif isinstance(value, buffer):
value = bytes(value)
if key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
super(Item, self).__setitem__(key, value)
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
specified, it is not reset (as it might otherwise be).
"""
super(Item, self).update(values)
if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime']
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
"""
if not self._db:
return None
return self._db.get_album(self)
# Interaction with file metadata.
def read(self, read_path=None):
"""Read the metadata from the associated file.
If `read_path` is specified, read metadata from that file
instead. Updates all the properties in `_media_fields`
from the media file.
Raises a `ReadError` if the file could not be read.
"""
if read_path is None:
read_path = self.path
else:
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except (OSError, IOError, UnreadableFileError) as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, (int, long)):
if value.bit_length() > 63:
value = 0
self[key] = value
# Database's mtime should now reflect the on-disk value.
if read_path == self.path:
self.mtime = self.current_mtime()
self.path = read_path
def write(self, path=None, tags=None):
"""Write the item's metadata to a media file.
All fields in `_media_fields` are written to disk according to
the values on this object.
`path` is the path of the mediafile to write the data to. It
defaults to the item's path.
`tags` is a dictionary of additional metadata the should be
written to the file. (These tags need not be in `_media_fields`.)
Can raise either a `ReadError` or a `WriteError`.
"""
if path is None:
path = self.path
else:
path = normpath(path)
# Get the data to write to the file.
item_tags = dict(self)
item_tags = {k: v for k, v in item_tags.items()
if k in self._media_fields} # Only write media fields.
if tags is not None:
item_tags.update(tags)
plugins.send('write', item=self, path=path, tags=item_tags)
# Open the file.
try:
mediafile = MediaFile(syspath(path),
id3v23=beets.config['id3v23'].get(bool))
except (OSError, IOError, UnreadableFileError) as exc:
raise ReadError(self.path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except (OSError, IOError, MutagenError) as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
if path == self.path:
self.mtime = self.current_mtime()
plugins.send('after_write', item=self, path=path)
def try_write(self, path=None, tags=None):
"""Calls `write()` but catches and logs `FileOperationError`
exceptions.
Returns `False` an exception was caught and `True` otherwise.
"""
try:
self.write(path, tags)
return True
except FileOperationError as exc:
log.error("{0}", exc)
return False
def try_sync(self, write=None):
"""Synchronize the item with the database and the media file
tags, updating them with this object's current state.
By default, the current `path` for the item is used to write
tags. If `write` is `False`, no tags are written. If `write` is
a path, tags are written to that file instead.
Similar to calling :meth:`write` and :meth:`store`.
"""
if write is True:
write = None
if write is not False:
self.try_write(path=write)
self.store()
# Files themselves.
def move_file(self, dest, copy=False, link=False):
"""Moves or copies the item's file, updating the path value if
the move succeeds. If a file exists at ``dest``, then it is
slightly modified to be unique.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if copy:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif link:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
else:
plugins.send("before_item_moved", item=self, source=self.path,
destination=dest)
util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path,
destination=dest)
# Either copying or moving succeeded, so update the stored path.
self.path = dest
def current_mtime(self):
"""Returns the current mtime of the file, rounded to the nearest
integer.
"""
return int(os.path.getmtime(syspath(self.path)))
def try_filesize(self):
"""Get the size of the underlying file in bytes.
If the file is missing, return 0 (and log a warning).
"""
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
log.warning(u'could not get filesize: {0}', exc)
return 0
# Model methods.
def remove(self, delete=False, with_album=True):
"""Removes the item. If `delete`, then the associated file is
removed from disk. If `with_album`, then the item's album (if
any) is removed if it the item was the last in the album.
"""
super(Item, self).remove()
# Remove the album if it is empty.
if with_album:
album = self.get_album()
if album and not album.items():
album.remove(delete, False)
# Send a 'item_removed' signal to plugins
plugins.send('item_removed', item=self)
# Delete the associated file.
if delete:
util.remove(self.path)
util.prune_dirs(os.path.dirname(self.path), self._db.directory)
self._db._memotable = {}
def move(self, copy=False, link=False, basedir=None, with_album=True):
"""Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location.
If `copy` is true, moving the file is copied rather than moved.
Similarly, `link` creates a symlink instead.
basedir overrides the library base directory for the
destination.
If the item is in an album, the album is given an opportunity to
move its art. (This can be disabled by passing
with_album=False.)
The item is stored to the database if it is in the database, so
any dirty fields prior to the move() call will be written as a
side effect. You probably want to call save() to commit the DB
transaction.
"""
self._check_db()
dest = self.destination(basedir=basedir)
# Create necessary ancestry for the move.
util.mkdirall(dest)
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, copy, link)
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(copy)
album.store()
# Prune vacated directory.
if not copy:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
def destination(self, fragment=False, basedir=None, platform=None,
path_formats=None):
"""Returns the path in the library directory designated for the
item (i.e., where the file ought to be). fragment makes this
method return just the path fragment underneath the root library
directory; the path is also returned as Unicode instead of
encoded as a bytestring. basedir can override the library's base
directory for the destination.
"""
self._check_db()
platform = platform or sys.platform
basedir = basedir or self._db.directory
path_formats = path_formats or self._db.path_formats
# Use a path format based on a query, falling back on the
# default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
break
else:
# No query matched; fall back to default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
break
else:
assert False, "no default path format"
if isinstance(path_format, Template):
subpath_tmpl = path_format
else:
subpath_tmpl = Template(path_format)
# Evaluate the selected template.
subpath = self.evaluate_template(subpath_tmpl, True)
# Prepare path for output: normalize Unicode characters.
if platform == 'darwin':
subpath = unicodedata.normalize('NFD', subpath)
else:
subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']:
subpath = unidecode(subpath)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
# When zero, try to determine from filesystem.
maxlen = util.max_filename_length(self._db.directory)
subpath, fellback = util.legalize_path(
subpath, self._db.replacements, maxlen,
os.path.splitext(self.path)[1], fragment
)
if fellback:
# Print an error message if legalization fell back to
# default replacements because of the maximum length.
log.warning('Fell back to default replacements when naming '
'file {}. Configure replacements to avoid lengthening '
'the filename.', subpath)
if fragment:
return subpath
else:
return normpath(os.path.join(basedir, subpath))
class Album(LibModel):
"""Provides access to information about albums stored in a
library. Reflects the library's "albums" table, including album
art.
"""
_table = 'albums'
_flex_table = 'album_attributes'
_always_dirty = True
_fields = {
'id': types.PRIMARY_ID,
'artpath': PathType(),
'added': DateType(),
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'album': types.STRING,
'genre': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'comp': types.BOOLEAN,
'mb_albumid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'albumdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
}
_search_fields = ('album', 'albumartist', 'genre')
_types = {
'path': PathType(),
'data_source': types.STRING,
}
_sorts = {
'albumartist': SmartArtistSort,
'artist': SmartArtistSort,
}
item_keys = [
'added',
'albumartist',
'albumartist_sort',
'albumartist_credit',
'album',
'genre',
'year',
'month',
'day',
'disctotal',
'comp',
'mb_albumid',
'mb_albumartistid',
'albumtype',
'label',
'mb_releasegroupid',
'asin',
'catalognum',
'script',
'language',
'country',
'albumstatus',
'albumdisambig',
'rg_album_gain',
'rg_album_peak',
'original_year',
'original_month',
'original_day',
]
"""List of keys that are set on an album's items.
"""
_format_config_key = 'format_album'
@classmethod
def _getters(cls):
# In addition to plugin-provided computed fields, also expose
# the album's directory as `path`.
getters = plugins.album_field_getters()
getters['path'] = Album.item_dir
getters['albumtotal'] = Album._albumtotal
return getters
def items(self):
"""Returns an iterable over the items associated with this
album.
"""
return self._db.items(dbcore.MatchQuery('album_id', self.id))
def remove(self, delete=False, with_items=True):
"""Removes this album and all its associated items from the
library. If delete, then the items' files are also deleted
from disk, along with any album art. The directories
containing the album are also removed (recursively) if empty.
Set with_items to False to avoid removing the album's items.
"""
super(Album, self).remove()
# Delete art file.
if delete:
artpath = self.artpath
if artpath:
util.remove(artpath)
# Remove (and possibly delete) the constituent items.
if with_items:
for item in self.items():
item.remove(delete, False)
def move_art(self, copy=False, link=False):
"""Move or copy any existing album art so that it remains in the
same directory as the items.
"""
old_art = self.artpath
if not old_art:
return
new_art = self.art_destination(old_art)
if new_art == old_art:
return
new_art = util.unique_path(new_art)
log.debug(u'moving album art {0} to {1}',
util.displayable_path(old_art),
util.displayable_path(new_art))
if copy:
util.copy(old_art, new_art)
elif link:
util.link(old_art, new_art)
else:
util.move(old_art, new_art)
self.artpath = new_art
# Prune old path when moving.
if not copy:
util.prune_dirs(os.path.dirname(old_art),
self._db.directory)
def move(self, copy=False, link=False, basedir=None):
"""Moves (or copies) all items to their destination. Any album
art moves along with them. basedir overrides the library base
directory for the destination. The album is stored to the
database, persisting any modifications to its metadata.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(copy, link, basedir=basedir, with_album=False)
# Move art.
self.move_art(copy, link)
self.store()
def item_dir(self):
"""Returns the directory containing the album's first item,
provided that such an item exists.
"""
item = self.items().get()
if not item:
raise ValueError('empty album')
return os.path.dirname(item.path)
def _albumtotal(self):
"""Return the total number of tracks on all discs on the album
"""
if self.disctotal == 1 or not beets.config['per_disc_numbering']:
return self.items()[0].tracktotal
counted = []
total = 0
for item in self.items():
if item.disc in counted:
continue
total += item.tracktotal
counted.append(item.disc)
if len(counted) == self.disctotal:
break
return total
def art_destination(self, image, item_dir=None):
"""Returns a path to the destination for the album art image
for the album. `image` is the path of the image that will be
moved there (used for its extension).
The path construction uses the existing path of the album's
items, so the album must contain at least one item or
item_dir must be provided.
"""
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = Template(beets.config['art_filename'].get(unicode))
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']:
subpath = unidecode(subpath)
subpath = util.sanitize_path(subpath,
replacements=self._db.replacements)
subpath = bytestring_path(subpath)
_, ext = os.path.splitext(image)
dest = os.path.join(item_dir, subpath + ext)
return bytestring_path(dest)
def set_art(self, path, copy=True):
"""Sets the album's cover art to the image at the given path.
The image is copied (or moved) into place, replacing any
existing art.
Sends an 'art_set' event with `self` as the sole argument.
"""
path = bytestring_path(path)
oldart = self.artpath
artdest = self.art_destination(path)
if oldart and samefile(path, oldart):
# Art already set.
return
elif samefile(path, artdest):
# Art already in place.
self.artpath = path
return
# Normal operation.
if oldart == artdest:
util.remove(oldart)
artdest = util.unique_path(artdest)
if copy:
util.copy(path, artdest)
else:
util.move(path, artdest)
self.artpath = artdest
plugins.send('art_set', album=self)
def store(self):
"""Update the database with the album information. The album's
tracks are also updated.
"""
# Get modified track fields.
track_updates = {}
for key in self.item_keys:
if key in self._dirty:
track_updates[key] = self[key]
with self._db.transaction():
super(Album, self).store()
if track_updates:
for item in self.items():
for key, value in track_updates.items():
item[key] = value
item.store()
def try_sync(self, write=True):
"""Synchronize the album and its items with the database and
their files by updating them with this object's current state.
`write` indicates whether to write tags to the item files.
"""
self.store()
for item in self.items():
item.try_sync(bool(write))
# Query construction helpers.
def parse_query_parts(parts, model_cls):
"""Given a beets query string as a list of components, return the
`Query` and `Sort` they represent.
Like `dbcore.parse_sorted_query`, with beets query prefixes and
special path query detection.
"""
# Get query types and their prefix characters.
prefixes = {':': dbcore.query.RegexpQuery}
prefixes.update(plugins.queries())
# Special-case path-like queries, which are non-field queries
# containing path separators (/).
path_parts = []
non_path_parts = []
for s in parts:
if PathQuery.is_path_query(s):
path_parts.append(s)
else:
non_path_parts.append(s)
query, sort = dbcore.parse_sorted_query(
model_cls, non_path_parts, prefixes
)
# Add path queries to aggregate query.
# Match field / flexattr depending on whether the model has the path field
fast_path_query = 'path' in model_cls._fields
query.subqueries += [PathQuery('path', s, fast_path_query)
for s in path_parts]
return query, sort
def parse_query_string(s, model_cls):
"""Given a beets query string, return the `Query` and `Sort` they
represent.
The string is split into components using shell-like syntax.
"""
assert isinstance(s, unicode), "Query is not unicode: {0!r}".format(s)
# A bug in Python < 2.7.3 prevents correct shlex splitting of
# Unicode strings.
# http://bugs.python.org/issue6988
s = s.encode('utf8')
try:
parts = [p.decode('utf8') for p in shlex.split(s)]
except ValueError as exc:
raise dbcore.InvalidQueryError(s, exc)
return parse_query_parts(parts, model_cls)
# The Library: interface to the database.
class Library(dbcore.Database):
"""A database of music containing songs and albums.
"""
_models = (Item, Album)
def __init__(self, path='library.blb',
directory='~/Music',
path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),),
replacements=None):
if path != ':memory:':
self.path = bytestring_path(normpath(path))
super(Library, self).__init__(path)
self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats
self.replacements = replacements
self._memotable = {} # Used for template substitution performance.
# Adding objects to the database.
def add(self, obj):
"""Add the :class:`Item` or :class:`Album` object to the library
database. Return the object's new id.
"""
obj.add(self)
self._memotable = {}
return obj.id
def add_album(self, items):
"""Create a new album consisting of a list of items.
The items are added to the database if they don't yet have an
ID. Return a new :class:`Album` object. The list items must not
be empty.
"""
if not items:
raise ValueError(u'need at least one item')
# Create the album structure using metadata from the first item.
values = dict((key, items[0][key]) for key in Album.item_keys)
album = Album(self, **values)
# Add the album structure and set the items' album_id fields.
# Store or add the items.
with self.transaction():
album.add(self)
for item in items:
item.album_id = album.id
if item.id is None:
item.add(self)
else:
item.store()
return album
# Querying.
def _fetch(self, model_cls, query, sort=None):
"""Parse a query and fetch. If a order specification is present
in the query string the `sort` argument is ignored.
"""
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, basestring):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentTypeError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
# provided sort.
if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
sort = parsed_sort
return super(Library, self)._fetch(
model_cls, query, sort
)
@staticmethod
def get_default_album_sort():
"""Get a :class:`Sort` object for albums from the config option.
"""
return dbcore.sort_from_strings(
Album, beets.config['sort_album'].as_str_seq())
@staticmethod
def get_default_item_sort():
"""Get a :class:`Sort` object for items from the config option.
"""
return dbcore.sort_from_strings(
Item, beets.config['sort_item'].as_str_seq())
def albums(self, query=None, sort=None):
"""Get :class:`Album` objects matching the query.
"""
return self._fetch(Album, query, sort or self.get_default_album_sort())
def items(self, query=None, sort=None):
"""Get :class:`Item` objects matching the query.
"""
return self._fetch(Item, query, sort or self.get_default_item_sort())
# Convenience accessors.
def get_item(self, id):
"""Fetch an :class:`Item` by its ID. Returns `None` if no match is
found.
"""
return self._get(Item, id)
def get_album(self, item_or_id):
"""Given an album ID or an item associated with an album, return
an :class:`Album` object for the album. If no such album exists,
returns `None`.
"""
if isinstance(item_or_id, int):
album_id = item_or_id
else:
album_id = item_or_id.album_id
if album_id is None:
return None
return self._get(Album, album_id)
# Default path template resources.
def _int_arg(s):
"""Convert a string argument to an integer for use in a template
function. May raise a ValueError.
"""
return int(s.strip())
class DefaultTemplateFunctions(object):
"""A container class for the default functions provided to path
templates. These functions are contained in an object to provide
additional context to the functions -- specifically, the Item being
evaluated.
"""
_prefix = b'tmpl_'
def __init__(self, item=None, lib=None):
"""Parametrize the functions. If `item` or `lib` is None, then
some functions (namely, ``aunique``) will always evaluate to the
empty string.
"""
self.item = item
self.lib = lib
def functions(self):
"""Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix):]] = getattr(self, key)
return out
@staticmethod
def tmpl_lower(s):
"""Convert a string to lower case."""
return s.lower()
@staticmethod
def tmpl_upper(s):
"""Covert a string to upper case."""
return s.upper()
@staticmethod
def tmpl_title(s):
"""Convert a string to title case."""
return s.title()
@staticmethod
def tmpl_left(s, chars):
"""Get the leftmost characters of a string."""
return s[0:_int_arg(chars)]
@staticmethod
def tmpl_right(s, chars):
"""Get the rightmost characters of a string."""
return s[-_int_arg(chars):]
@staticmethod
def tmpl_if(condition, trueval, falseval=u''):
"""If ``condition`` is nonempty and nonzero, emit ``trueval``;
otherwise, emit ``falseval`` (if provided).
"""
try:
int_condition = _int_arg(condition)
except ValueError:
if condition.lower() == "false":
return falseval
else:
condition = int_condition
if condition:
return trueval
else:
return falseval
@staticmethod
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents.
"""
return unidecode(s)
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`.
"""
cur_fmt = beets.config['time_format'].get(unicode)
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return u''
if self.item.album_id is None:
return u''
memokey = ('aunique', keys, disam, self.item.album_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
keys = keys or 'albumartist album'
disam = disam or 'albumtype year label catalognum albumdisambig'
keys = keys.split()
disam = disam.split()
album = self.lib.get_album(self.item)
if not album:
# Do nothing for singletons.
self.lib._memotable[memokey] = u''
return u''
# Find matching albums to disambiguate with.
subqueries = []
for key in keys:
value = getattr(album, key)
subqueries.append(dbcore.MatchQuery(key, value))
albums = self.lib.albums(dbcore.AndQuery(subqueries))
# If there's only one album to matching these details, then do
# nothing.
if len(albums) == 1:
self.lib._memotable[memokey] = u''
return u''
# Find the first disambiguator that distinguishes the albums.
for disambiguator in disam:
# Get the value for each album for the current field.
disam_values = set([getattr(a, disambiguator) for a in albums])
# If the set of unique values is equal to the number of
# albums in the disambiguation set, we're done -- this is
# sufficient disambiguation.
if len(disam_values) == len(albums):
break
else:
# No disambiguator distinguished all fields.
res = u' {0}'.format(album.id)
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = album.formatted(True).get(disambiguator)
res = u' [{0}]'.format(disam_value)
self.lib._memotable[memokey] = res
return res
# Get the name of tmpl_* functions in the above class.
DefaultTemplateFunctions._func_names = \
[s for s in dir(DefaultTemplateFunctions)
if s.startswith(DefaultTemplateFunctions._prefix)]
| 33.545329
| 79
| 0.59006
|
e6a12b2e6c68621a3e3b887879b2c25c028fe370
| 893
|
py
|
Python
|
src/alert/forms.py
|
gettis/tlsscout
|
55dd5a1dbc3329aa451bfd82aac9a0f68d52136f
|
[
"BSD-3-Clause"
] | 9
|
2015-03-16T08:40:34.000Z
|
2020-10-13T15:15:38.000Z
|
src/alert/forms.py
|
gettis/tlsscout
|
55dd5a1dbc3329aa451bfd82aac9a0f68d52136f
|
[
"BSD-3-Clause"
] | 6
|
2015-03-22T19:32:52.000Z
|
2022-02-11T03:39:24.000Z
|
src/alert/forms.py
|
gettis/tlsscout
|
55dd5a1dbc3329aa451bfd82aac9a0f68d52136f
|
[
"BSD-3-Clause"
] | 8
|
2015-05-02T13:21:40.000Z
|
2020-09-30T17:59:49.000Z
|
from django import forms
from alert.models import SiteAlert, TagAlert, GroupAlert
### enable site alert form
class EnableSiteAlertForm(forms.ModelForm):
class Meta:
model = SiteAlert
fields = []
### disable site alert form
class DisableSiteAlertForm(forms.ModelForm):
class Meta:
model = SiteAlert
fields = []
### enable group alert form
class EnableGroupAlertForm(forms.ModelForm):
class Meta:
model = GroupAlert
fields = []
### disable group alert form
class DisableGroupAlertForm(forms.ModelForm):
class Meta:
model = GroupAlert
fields = []
### enable tag alert form
class EnableTagAlertForm(forms.ModelForm):
class Meta:
model = TagAlert
fields = []
### disable tag alert form
class DisableTagAlertForm(forms.ModelForm):
class Meta:
model = TagAlert
fields = []
| 22.325
| 56
| 0.661814
|
023dbfe184925ea30052781cdca7dacb1407d622
| 3,086
|
py
|
Python
|
python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py
|
Mattlk13/nnabla
|
09b7dfd03bd88366d1d1f6cc61492b42175e35e7
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py
|
Mattlk13/nnabla
|
09b7dfd03bd88366d1d1f6cc61492b42175e35e7
|
[
"Apache-2.0"
] | 1
|
2020-11-09T07:33:29.000Z
|
2020-11-09T07:33:29.000Z
|
python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py
|
Mattlk13/nnabla
|
09b7dfd03bd88366d1d1f6cc61492b42175e35e7
|
[
"Apache-2.0"
] | null | null | null |
import nnabla as nn
import numpy as np
from .identity import IdentityConverter
from .helpers import GraphInfo
class BatchNormalizationLinearConverter(IdentityConverter):
"""
The parameters of the batch normalization replaced simple scale and bias.
Args:
black_list (list): Black list of the function list.
params (:obj:`OrderedDict`): Result of nn.get_parameters().
name (:obj:`str`): Prefix of the parameter scope.
"""
def __init__(self,
black_list=[], params=None,
name="bn-linear"):
super(BatchNormalizationLinearConverter, self).__init__(black_list,
params, name)
def convert(self, vroot, entry_variables):
"""
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
bn_func = func
# TODO: should deal with both?
if not bn_func.info.args["batch_stat"]:
o = self._bn_linear_conversion(bn_func, cnt)
cnt += 1
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable
def _bn_linear_conversion(self, bn_func, cnt):
# Conversion
eps_data = bn_func.info.args["eps"]
beta_data = np.squeeze(bn_func.inputs[1].d)
gamma_data = np.squeeze(bn_func.inputs[2].d)
mean_data = np.squeeze(bn_func.inputs[3].d)
var_data = np.squeeze(bn_func.inputs[4].d)
sigma_data = np.sqrt(var_data + eps_data)
c0_data = gamma_data / sigma_data
c1_data = beta_data - (gamma_data * mean_data) / sigma_data
# Reshape
oshape = bn_func.inputs[1].shape
c0_data = c0_data.reshape(oshape)
c1_data = c1_data.reshape(oshape)
# Inputs
x = bn_func.inputs[0]
x = self.input_map[x] if x in self.input_map else x
c0 = nn.parameter.get_parameter_or_create("c0-{}-{}".format(self.name, cnt),
c0_data.shape, c0_data)
c1 = nn.parameter.get_parameter_or_create("c1-{}-{}".format(self.name, cnt),
c1_data.shape, c1_data)
# Function call
o = c0 * x + c1
# Map output of ref graph to output of new graph
x = bn_func.outputs[0]
self.input_map[x] = o
# Store output (just in case)
self.outputs.append(o)
return o
| 35.068182
| 95
| 0.567401
|
d2714f3870c3bf51d7052869716c06cecd6fab48
| 19,123
|
py
|
Python
|
bromelia/bromelia.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | null | null | null |
bromelia/bromelia.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | null | null | null |
bromelia/bromelia.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
bromelia.bromelia
~~~~~~~~~~~~~~~~~
This module contains the orchestrator to allow a seamless
Diameter application implementation.
:copyright: (c) 2020-present Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import logging
import multiprocessing
import sys
import threading
import time
from copy import copy
from types import SimpleNamespace
from ._internal_utils import _convert_file_to_config
from ._internal_utils import application_id_look_up
from .avps import *
from .base import DiameterAnswer
from .base import DiameterRequest
from .config import *
from .constants import *
from .exceptions import BromeliaException
from .setup import Diameter
from .utils import is_3xxx_failure
from .utils import is_4xxx_failure
from .utils import is_5xxx_failure
worker_logger = logging.getLogger("Worker")
bromelia_logger = logging.getLogger("Bromelia")
def get_application_string_by_id(application_id):
applications = [ application for application in globals().items() if "DIAMETER_APPLICATION_" in application[0] ]
for _string, _id in applications:
if _id == application_id:
return _string.split("DIAMETER_APPLICATION_")[1].lower()
def get_formatted_answer_as_per_request(answer, request):
answer.header.application_id = request.header.application_id
answer.header.hop_by_hop = request.header.hop_by_hop
answer.header.end_to_end = request.header.end_to_end
if request.has_avp("session_id_avp"):
answer.session_id_avp.data = request.session_id_avp.data
answer.refresh()
return answer
class Global(object):
"""Helper class to allow the creation of Bromelia attributes on the go. It
works as a namespace.
"""
pass
class PendingAnswer:
"""Helper class to allow the pending answer tracking of expected Diameter
Answers for a set of Diameter Requests. It supports the Bromelia class inner
workings.
"""
def __init__(self):
self.recv_event = threading.Event()
self.stop_event = threading.Event()
self.message = None
class Worker(multiprocessing.Process):
associations = dict()
recv_queues = list()
def __init__(self, app, manager):
multiprocessing.Process.__init__(self)
self.daemon = True
self.name = Worker.set_name(app.config["APPLICATIONS"])
self.app = app
self.is_open = manager.Event()
self.recv_queue = manager.Queue()
self.send_queue = manager.Queue()
self.send_event = manager.Event()
self.send_lock = manager.Lock()
self.pending_answers = dict()
self.update_associations()
self.update_recv_queues()
worker_logger.debug(f"Initializing Worker {self}")
@staticmethod
def set_name(applications):
name = ""
for application in applications:
app_name = application_id_look_up(application["app_id"])[0]
name += f'{app_name};'
return name[:-1].replace(" ", "_")
def update_associations(self):
for application in self.app.config["APPLICATIONS"]:
association = {application["app_id"]: self}
Worker.associations.update(association)
def update_recv_queues(self):
Worker.recv_queues.append(self.recv_queue)
def send_handler(self):
while True:
worker_logger.debug(f"[{self}] Awaiting someone sending a message")
self.send_event.wait()
if not self.send_queue.empty():
worker_logger.debug(f"[{self}] There is/are "\
f"{self.send_queue.qsize()} message(s) "\
f"available to be sent")
if self.send_queue.qsize() == 1:
outgoing_message = self.send_queue.get()
session_id = outgoing_message.session_id_avp.data
worker_logger.debug(f"[{self}][{session_id}] Sending to "\
f"endpoint")
self.app.send_message(outgoing_message)
self.send_event.clear()
self.send_lock.release()
elif self.send_queue.qsize() > 1:
outgoing_messages = list()
while not self.send_queue.empty():
outgoing_message = self.send_queue.get()
outgoing_messages.append(outgoing_message)
session_id = outgoing_message.session_id_avp.data
worker_logger.debug(f"[{self}][{session_id}] Sending "\
f"to endpoint")
self.app.send_messages(outgoing_messages)
self.send_event.clear()
self.send_lock.release()
def recv_handler(self):
while True:
worker_logger.debug(f"[{self}] Awaiting requests from "\
f"someone else")
incoming_message = self.app.get_message()
if incoming_message:
session_id = incoming_message.session_id_avp.data
worker_logger.debug(f"[{self}][{session_id}] Received "\
f"request")
self.recv_queue.put(incoming_message)
worker_logger.debug(f"[{self}][{session_id}] Putting request "\
f"into `recv_queue`")
#: it starts under worker.start() call
def run(self):
with self.app.context():
try:
while self.app.is_open():
self.is_open.set()
recv_thrd = threading.Thread(name="recv_handler",
target=self.recv_handler,
daemon=True)
send_thrd = threading.Thread(name="send_handler",
target=self.send_handler,
daemon=True)
recv_thrd.start()
send_thrd.start()
recv_thrd.join()
send_thrd.join()
except KeyboardInterrupt:
sys.exit(0)
self.is_open.clear()
def is_running(self):
return self.is_open.is_set()
class Bromelia:
"""The Bromelia object implements a WSGI-like application but for Diameter
protocol and acts as the central object. It will spin up one or more
processes by using the Worker class. The number of processes depends on the
number of Diameter interfaces needed for a given Diameter application.
Each process created by running Worker objects will represent a single
Diameter interface sending to / receiving byte stream from a Peer Node. Such
traffic will be proxied to the centralized process which holds the Bromelia
object. The Bromelia object is responsible for process either Diameter
requests or Diameter answers according to the Diameter interface and
properly forward it to the expected Diameter interface.
It is strongly recommended to define initially right after the Bromelia
instantiation the set of Diameter messages to be used in each Diameter
interface.
Usually you create a :class:`Bromelia` instance in your main module.
Usage::
>>> from bromelia import Bromelia
>>> app = Bromelia()
>>> app.run()
"""
def __init__(self, config_file=None):
bromelia_logger.debug(f"Initializing Bromelia application")
self.config_file = config_file
self.configs = _convert_file_to_config(self.config_file, globals())
self.routes = {}
self.sessions = {}
self.g = Global()
self.recv_queues = None
self.associations = None
self.request_threshold = threading.Barrier(parties=REQUEST_THRESHOLD)
self.request_id = 0
self.answer_threshold = threading.Barrier(parties=ANSWER_THRESHOLD)
self.answer_id = 0
self.send_threshold = threading.Barrier(parties=SEND_THRESHOLD)
def is_valid_session_key(self, session_key):
session = self.sessions.get(session_key, None)
if session is not None:
return True
return False
def _create_applications(self, debug, is_logging):
apps = list()
for config in self.configs:
app = Diameter(config=config,
debug=debug,
is_logging=is_logging)
apps.append(app)
return apps
def _run(self, debug, is_logging):
apps = self._create_applications(debug, is_logging)
bromelia_logger.debug(f"Route found: {self.routes})")
with multiprocessing.Manager() as manager:
for app in apps:
bromelia_logger.debug(f"Initializing Worker for app {app}")
worker = Worker(app, manager)
bromelia_logger.debug(f"Starting Worker for app {app}")
worker.start()
self.recv_queues = Worker.recv_queues
self.associations = Worker.associations
bromelia_logger.debug(f"Loading recv_queues: {self.recv_queues}")
bromelia_logger.debug(f"Loading associations: {self.associations}")
self.main()
def run(self, debug=False, is_logging=False, block=True):
threading.Thread(target=self._run,
args=(debug, is_logging)).start()
#: Wait spinning up Diameter objects in the connection layer processes
while True:
time.sleep(BROMELIA_LOADING_TICKER)
if self.associations is not None and self.recv_queues is not None:
break
#: Block til all Diameter objects are opened in the connection layer
#: processes - that is, connection are established
associations = copy(self.associations)
while block:
time.sleep(BROMELIA_LOADING_TICKER)
if not associations:
break
for key in list(associations.keys()):
association = associations[key]
if association.is_open.is_set():
associations.pop(key)
def main(self):
bromelia_logger.debug(f"Starting Main Bromelia Loop")
thrds = list()
while True:
time.sleep(BROMELIA_TICKER)
for recv_queue in self.recv_queues:
if not recv_queue.empty():
message = recv_queue.get()
if message.header.is_request():
thrd = threading.Thread(name=f"recv_request_{self.request_id}",
target=self.callback_route,
args=(message,))
self.request_id += 1
else:
thrd = threading.Thread(name=f"recv_answer_{self.answer_id}",
target=self.handler_pending_answers,
args=(message,))
self.answer_id += 1
thrd.start()
thrds.append(thrd)
for thrd in thrds:
if not thrd.is_alive():
thrd.join()
thrds.remove(thrd)
def route(self, application_id, command_code):
def outer_function(route_function):
def inner_function(*args, **kwargs):
if application_id not in self.routes:
_route = {application_id: {command_code: route_function}}
self.routes.update(_route)
else:
_command_code = {command_code: route_function}
self.routes[application_id].update(_command_code)
return inner_function()
return outer_function
def handler_pending_answers(self, message):
try:
self.answer_threshold.wait(timeout=PROCESS_TIMER)
except threading.BrokenBarrierError:
self.answer_threshold.reset()
application_id = message.header.application_id
hop_by_hop = message.header.hop_by_hop
session_id = message.session_id_avp.data
worker = self.associations[application_id]
logging_info = f"[{worker}][{session_id}][HbH:{hop_by_hop.hex()}]"
bromelia_logger.debug(f"{logging_info} Check if it is an expected "\
f"answer")
if hop_by_hop in worker.pending_answers.keys():
bromelia_logger.debug(f"{logging_info} Found Hop-By-Hop in "\
f"Pending answer")
pending_answer = worker.pending_answers[hop_by_hop]
pending_answer.message = message
bromelia_logger.debug(f"{logging_info} Update Pending answer "\
f"with the received answer")
pending_answer.recv_event.set()
pending_answer.stop_event.wait()
worker.pending_answers.pop(message.header.hop_by_hop, None)
def get_error_answer_for_request(self, request):
application_id = request.header.application_id
config = self.associations[application_id].app.config
avps = [
SessionIdAVP(request.session_id_avp.data),
ResultCodeAVP(DIAMETER_UNABLE_TO_COMPLY),
OriginHostAVP(config["LOCAL_NODE_HOSTNAME"]),
OriginRealmAVP(config["LOCAL_NODE_REALM"]),
DestinationRealmAVP(request.origin_realm_avp.data),
DestinationHostAVP(request.origin_host_avp.data)
]
return DiameterAnswer(header=request.header, avps=avps)
def callback_route(self, request):
try:
self.request_threshold.wait(timeout=PROCESS_TIMER)
except threading.BrokenBarrierError:
self.request_threshold.reset()
application_id = request.header.application_id
command_code = request.header.command_code
session_id = request.session_id_avp.data
callback_function = self.routes[application_id][command_code]
bromelia_logger.debug(f"[{session_id}] Callback function has "\
f"been triggered: {callback_function}")
try:
answer = callback_function(request)
except Exception as e:
bromelia_logger.exception(f"[{session_id}] Error has been "\
f"raised in callback_function: {e.args}")
answer = None
if not isinstance(answer, DiameterAnswer):
bromelia_logger.exception(f"[{session_id}] There is no answer "\
f"processed to be sent. We are sending "\
f"UNABLE_TO_COMPLY")
error_answer = self.get_error_answer_for_request(request)
self.send_message(error_answer)
raise BromeliaException("Route function must return "\
"DiameterAnswer object")
answer = get_formatted_answer_as_per_request(answer, request)
if (is_3xxx_failure(answer) or
is_4xxx_failure(answer) or
is_5xxx_failure(answer)):
answer.header.set_error_bit(True)
if answer.has_avp("experimental_result_avp"):
if answer.has_avp("result_code_avp"):
answer.pop("result_code_avp")
bromelia_logger.debug(f"[{session_id}] Sending answer")
self.send_message(answer)
def send_message(self, message):
application_id = message.header.application_id
hop_by_hop = message.header.hop_by_hop
session_id = message.session_id_avp.data
worker = self.associations[application_id]
logging_info = f"[{worker}][{session_id}][HbH:{hop_by_hop.hex()}]"
bromelia_logger.debug(f"{logging_info} Application needs to send a "\
f"message")
if not worker.is_running():
bromelia_logger.debug(f"{logging_info} It seems the worker is "\
f"not running anymore")
return None
bromelia_logger.debug(f"{logging_info} Putting message into "\
f"`send_queue`")
try:
self.send_threshold.wait(timeout=SEND_THRESHOLD_TICKER)
except threading.BrokenBarrierError:
self.send_threshold.reset()
worker.send_lock.acquire()
worker.send_queue.put(message)
worker.send_event.set()
if message.header.is_request():
pending_answer = PendingAnswer()
worker.pending_answers.update({hop_by_hop: pending_answer})
bromelia_logger.debug(f"{logging_info} Added Pending answer")
pending_answer.recv_event.wait()
bromelia_logger.debug(f"{logging_info} Notification from "\
f"Pending answer")
pending_answer.recv_event.clear()
pending_answer.stop_event.set()
bromelia_logger.debug(f"{logging_info} Cleanup of Pending answer")
return pending_answer.message
def load_messages_into_application_id(self, messages, application_id):
def decorated_message(message):
def proxy(**attrs):
_config = None
for config in self.configs:
for application in config["APPLICATIONS"]:
if application["app_id"] == application_id:
_config = config
default_attrs = {
"origin_host": _config["LOCAL_NODE_HOSTNAME"],
"origin_realm": _config["LOCAL_NODE_REALM"],
}
attrs.update(**default_attrs)
peer_node_realm = _config["PEER_NODE_REALM"]
local_node_hostname = _config["LOCAL_NODE_HOSTNAME"]
if issubclass(message, DiameterRequest):
request_attrs = {"destination_realm": peer_node_realm}
attrs.update(**request_attrs)
if "session_id" in message.mandatory:
attrs.update({"session_id": local_node_hostname})
return message(**attrs)
return proxy
application_string = get_application_string_by_id(application_id)
self.__dict__.update({application_string: SimpleNamespace()})
for message in messages:
_name = [letter for letter in message.__name__ if letter.isupper()]
short_message_name = "".join(_name).upper()
setattr(self.__dict__[application_string],
short_message_name,
decorated_message(message))
| 34.769091
| 116
| 0.591434
|
81e258e2b3b01479c057b80c9edfd844c1edec90
| 1,314
|
py
|
Python
|
load.py
|
piersy/theano-machine-learning
|
5c6be6ef03de69dac8062d080c0af70ff427f83c
|
[
"MIT"
] | 1
|
2016-02-17T08:36:26.000Z
|
2016-02-17T08:36:26.000Z
|
load.py
|
piersy/theano-machine-learning
|
5c6be6ef03de69dac8062d080c0af70ff427f83c
|
[
"MIT"
] | null | null | null |
load.py
|
piersy/theano-machine-learning
|
5c6be6ef03de69dac8062d080c0af70ff427f83c
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
datasets_dir = './media/datasets/'
def one_hot(x, n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x), n))
o_h[np.arange(len(x)), x] = 1
return o_h
def mnist(ntrain=60000, ntest=10000, onehot=True):
data_dir = os.path.join(datasets_dir, 'mnist/')
fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28 * 28)).astype(float)
fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28 * 28)).astype(float)
fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trX = trX / 255.
teX = teX / 255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX, teX, trY, teY
| 26.28
| 64
| 0.599696
|
443156bb398c2d3e64c8bbb87f29e438502d053f
| 216
|
py
|
Python
|
models/ngi_models.py
|
rbaylon/ngi
|
97907dd687e15d35449f6cf850b6b37e379114ba
|
[
"BSD-3-Clause"
] | null | null | null |
models/ngi_models.py
|
rbaylon/ngi
|
97907dd687e15d35449f6cf850b6b37e379114ba
|
[
"BSD-3-Clause"
] | null | null | null |
models/ngi_models.py
|
rbaylon/ngi
|
97907dd687e15d35449f6cf850b6b37e379114ba
|
[
"BSD-3-Clause"
] | null | null | null |
from baseapp import db
class Chapter(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(100))
founder = db.Column(db.String(50))
founded = db.Column(db.String(10))
| 27
| 51
| 0.685185
|
62bcddfbe400085841ed4b1f7120a0ac85400623
| 6,372
|
py
|
Python
|
spot_micro_joy/scripts/spotMicroJoystickMove.py
|
luoyongh/spotMicro
|
2e33243c21342ab4159a59fe57ed7057adfcb0c5
|
[
"MIT"
] | null | null | null |
spot_micro_joy/scripts/spotMicroJoystickMove.py
|
luoyongh/spotMicro
|
2e33243c21342ab4159a59fe57ed7057adfcb0c5
|
[
"MIT"
] | null | null | null |
spot_micro_joy/scripts/spotMicroJoystickMove.py
|
luoyongh/spotMicro
|
2e33243c21342ab4159a59fe57ed7057adfcb0c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import rospy
from std_msgs.msg import Float32, Bool
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Twist
from math import pi
class SpotMicroJoystickControl():
BUTTON_IDLE = 0
BUTTON_WALK = 1
BUTTON_STAND = 2
BUTTON_PEE = 3
BUTTON_ANGLE = 4
ANGLE_AXES_ROLL = 0
ANGLE_AXES_HEIGHT = 1
ANGLE_AXES_YAW = 2
ANGLE_AXES_PITCH = 3
WALK_AXES_FORWARD = 1
WALK_AXES_STRAFE = 0
WALK_AXES_YAW = 2
MODE_IDLE = 0
MODE_STAND = 1
MODE_ANGLE = 2
MODE_WALK = 3
MODE_PEE = 4
MAX_ROLL_DEG = 45
MAX_YAW_DEG = 45
MAX_PATCH_DEG = 45
MAX_FORWARD_SPEED = 0.05
MAX_STRAFE_SPEED = 0.05
MAX_YAW_SPEED_DEG = 15
def __init__(self):
self._angle_cmd_msg = Vector3()
self._angle_cmd_msg.x = 0
self._angle_cmd_msg.y = 0
self._angle_cmd_msg.z = 0
self._vel_cmd_msg = Twist()
self._vel_cmd_msg.linear.x = 0
self._vel_cmd_msg.linear.y = 0
self._vel_cmd_msg.linear.z = 0
self._vel_cmd_msg.angular.x = 0
self._vel_cmd_msg.angular.y = 0
self._vel_cmd_msg.angular.z = 0
self._walk_event_cmd_msg = Bool()
self._walk_event_cmd_msg.data = True # Mostly acts as an event driven action on receipt of a true message
self._pee_event_cmd_msg = Bool()
self._pee_event_cmd_msg.data = True
self._stand_event_cmd_msg = Bool()
self._stand_event_cmd_msg.data = True
self._idle_event_cmd_msg = Bool()
self._idle_event_cmd_msg.data = True
rospy.loginfo("Setting Up the Spot Micro Joystick Control Node...")
# Set up and title the ros node for this code
rospy.init_node('spot_micro_joystick_control')
# Create publishers for commanding velocity, angle, and robot states
self._ros_pub_angle_cmd = rospy.Publisher('/angle_cmd', Vector3, queue_size=1)
self._ros_pub_vel_cmd = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._ros_pub_pee_cmd = rospy.Publisher('/pee_cmd', Bool, queue_size=1)
self._ros_pub_stand_cmd = rospy.Publisher('/stand_cmd', Bool, queue_size=1)
self._ros_pub_walk_cmd = rospy.Publisher('/walk_cmd', Bool, queue_size=1)
self._ros_pub_stand_cmd = rospy.Publisher('/stand_cmd', Bool, queue_size=1)
self._ros_pub_idle_cmd = rospy.Publisher('/idle_cmd', Bool, queue_size=1)
rospy.loginfo("Joystick control node publishers corrrectly initialized")
def reset_all_motion_commands_to_zero(self):
'''Reset body motion cmd states to zero and publish zero value body motion commands'''
self._vel_cmd_msg.linear.x = 0
self._vel_cmd_msg.linear.y = 0
self._vel_cmd_msg.linear.z = 0
self._vel_cmd_msg.angular.x = 0
self._vel_cmd_msg.angular.y = 0
self._vel_cmd_msg.angular.z = 0
self._ros_pub_vel_cmd.publish(self._vel_cmd_msg)
def reset_all_angle_commands_to_zero(self):
'''Reset angle cmd states to zero and publish them'''
self._angle_cmd_msg.x = 0
self._angle_cmd_msg.y = 0
self._angle_cmd_msg.z = 0
self._ros_pub_angle_cmd.publish(self._angle_cmd_msg)
def on_joy(self, msg):
self.on_joy_buttons(msg.buttons)
self.on_joy_axes(msg.axes)
def on_joy_buttons(self, buttons):
if buttons[self.BUTTON_IDLE] == 1:
self._ros_pub_idle_cmd.publish(self._idle_event_cmd_msg)
rospy.loginfo('Idle command issued from joystick.')
self.mode = self.MODE_IDLE
elif buttons[self.BUTTON_STAND] == 1:
self._ros_pub_stand_cmd.publish(self._stand_event_cmd_msg)
rospy.loginfo('Stand command issued from joystick.')
self.mode = self.MODE_STAND
elif buttons[self.BUTTON_ANGLE] == 1:
self.reset_all_angle_commands_to_zero()
rospy.loginfo('Entering joystick angle command mode.')
self.mode = self.MODE_ANGLE
elif buttons[self.BUTTON_WALK] == 1:
self.reset_all_angle_commands_to_zero()
self._ros_pub_walk_cmd.publish(self._walk_event_cmd_msg)
rospy.loginfo('Entering joystick walk command mode.')
self.mode = self.MODE_WALK
elif buttons[self.BUTTON_PEE] == 1:
self.reset_all_angle_commands_to_zero()
self._ros_pub_pee_cmd.publish(self._pee_event_cmd_msg)
rospy.loginfo('Entering joystick pee command mode.')
self.mode = self.MODE_PEE
def on_joy_axes(self, axes):
if self.mode == self.MODE_ANGLE:
self.on_joy_angle_mode(axes)
elif self.mode == self.MODE_WALK:
self.on_joy_walk_mode(axes)
def on_joy_walk_mode(self, axes):
self._vel_cmd_msg.linear.x = axes[self.WALK_AXES_FORWARD] * self.MAX_FORWARD_SPEED
self._vel_cmd_msg.linear.y = axes[self.WALK_AXES_STRAFE] * self.MAX_STRAFE_SPEED
self._vel_cmd_msg.angular.z = pi / 180 * axes[self.WALK_AXES_YAW] * self.MAX_YAW_SPEED_DEG
print('Cmd Values: x speed: %1.3f m/s, y speed: %1.3f m/s, yaw rate: %1.3f deg/s ' \
% (self._vel_cmd_msg.linear.x, self._vel_cmd_msg.linear.y, self._vel_cmd_msg.angular.z * 180 / pi))
self._ros_pub_vel_cmd.publish(self._vel_cmd_msg)
def on_joy_angle_mode(self, axes):
self._angle_cmd_msg.x = pi / 180 * axes[self.ANGLE_AXES_ROLL] * self.MAX_ROLL_DEG * -1
self._angle_cmd_msg.y = pi / 180 * axes[self.ANGLE_AXES_PITCH] * self.MAX_PATCH_DEG * -1
self._angle_cmd_msg.z = pi / 180 * axes[self.ANGLE_AXES_YAW] * self.MAX_YAW_DEG
print('Cmd Values: phi: %1.3f deg, theta: %1.3f deg, psi: %1.3f deg ' \
% (
self._angle_cmd_msg.x * 180 / pi, self._angle_cmd_msg.y * 180 / pi,
self._angle_cmd_msg.z * 180 / pi))
self._ros_pub_angle_cmd.publish(self._angle_cmd_msg)
def run(self):
print("green = idle")
print("yellow = stand")
print("blue = angle")
print("red = walk")
# Publish all body motion commands to 0
self.reset_all_motion_commands_to_zero()
rospy.Subscriber("/joy", Joy, self.on_joy)
rospy.spin()
if __name__ == "__main__":
smjc = SpotMicroJoystickControl()
smjc.run()
| 37.046512
| 114
| 0.658977
|
fb5447021aba4be2ec17a89e1cbd854e53bed59a
| 1,258
|
py
|
Python
|
opsi/modules/draw/fps.py
|
ntamer03/opensight
|
ae811ba203b2fd8804087a5a2f249ace5fae6842
|
[
"MIT"
] | null | null | null |
opsi/modules/draw/fps.py
|
ntamer03/opensight
|
ae811ba203b2fd8804087a5a2f249ace5fae6842
|
[
"MIT"
] | null | null | null |
opsi/modules/draw/fps.py
|
ntamer03/opensight
|
ae811ba203b2fd8804087a5a2f249ace5fae6842
|
[
"MIT"
] | null | null | null |
import datetime
from dataclasses import dataclass
import cv2
import numpy as np
from opsi.manager.manager_schema import Function
from opsi.util.cv import Mat
class FPS:
def __init__(self):
self._start = None
self._end = None
self._numFrames = 0
def start(self):
self._start = datetime.datetime.now()
return self
def end(self):
self._end = datetime.datetime.now()
def update(self):
self._numFrames += 1
def elapsed(self):
return (datetime.datetime.now() - self._start).total_seconds()
def fps(self):
return self._numFrames / self.elapsed()
class DrawFPS(Function):
def on_start(self):
self.f = FPS()
self.f.start()
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
self.f.update()
fps_str = str(round(self.f.fps(), 1))
draw = np.copy(inputs.img.mat.img)
cv2.putText(
draw,
fps_str,
(30, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1.0,
(255, 255, 255),
lineType=cv2.LINE_AA,
)
draw = Mat(draw)
return self.Outputs(img=draw)
| 20.290323
| 70
| 0.565183
|
588cbe3c352c3fd571d939ee5b492e449cd9add3
| 1,471
|
py
|
Python
|
tests/test_airways.py
|
RaphaelDELAIR/traffic
|
47591f39f83e22aff65ae06987bce238cd2dd353
|
[
"MIT"
] | 209
|
2018-06-29T10:55:27.000Z
|
2022-03-31T19:30:06.000Z
|
tests/test_airways.py
|
RaphaelDELAIR/traffic
|
47591f39f83e22aff65ae06987bce238cd2dd353
|
[
"MIT"
] | 151
|
2018-10-05T12:47:53.000Z
|
2022-02-20T14:47:57.000Z
|
tests/test_airways.py
|
RaphaelDELAIR/traffic
|
47591f39f83e22aff65ae06987bce238cd2dd353
|
[
"MIT"
] | 68
|
2018-12-19T13:42:34.000Z
|
2022-03-11T15:33:41.000Z
|
import pytest
from traffic.data import airways, eurofirs
def test_basic() -> None:
foo = airways["FOO"]
assert foo is None
l888 = airways["L888"]
assert l888 is not None
assert 2.8e6 < l888.project_shape().length < 3e6
def test_through_extent() -> None:
narak_airways = set(
a.name for a in airways.through("NARAK") if a.name.startswith("U")
)
assert narak_airways == {"UN859", "UN869", "UT122", "UY155", "UZ365"}
air_ext = airways.extent(eurofirs["LSAS"])
assert air_ext is not None
swiss_length = max(
a.project_shape().length for a in air_ext.through("DITON")
)
full_length = max(
a.project_shape().length for a in airways.through("DITON")
)
assert swiss_length < 1e6 < full_length
air_ext = airways.extent(eurofirs["LFBB"])
assert air_ext is not None
short_un871 = air_ext["UN871"]
assert short_un871 is not None
assert short_un871.navaids == [
"LARDA",
"RONNY",
"TOPTU",
"GONUP",
"TOU",
"GAI",
"MAKIL",
"DITEV",
"MEN",
]
assert len(short_un871["LARDA", "TOPTU"].shape.coords) == 3
assert len(short_un871["TOPTU", "LARDA"].shape.coords) == 3
with pytest.raises(RuntimeError):
short_un871["LARDA", "LARDA"]
with pytest.raises(ValueError):
short_un871["ERROR", "LARDA"]
with pytest.raises(ValueError):
short_un871["LARDA", "ERROR"]
| 24.932203
| 74
| 0.60843
|
2c822638f1c9eac0eb8a3118d7e3026af1737670
| 6,353
|
py
|
Python
|
tools/eval.py
|
ruyijidan/PaddleDetection
|
e47e826ed1e4cefcbe4b6bdb1f976c68faa91799
|
[
"Apache-2.0"
] | 2
|
2020-04-16T15:09:23.000Z
|
2020-05-18T04:49:48.000Z
|
tools/eval.py
|
o20157/PaddleDetection
|
8102aaad6ff1e0da181cf1db99189c80a0a533d1
|
[
"Apache-2.0"
] | null | null | null |
tools/eval.py
|
o20157/PaddleDetection
|
8102aaad6ff1e0da181cf1db99189c80a0a533d1
|
[
"Apache-2.0"
] | 2
|
2021-08-17T09:40:57.000Z
|
2021-12-29T03:25:26.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
sys.path.append(parent_path)
import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.data.reader import create_reader
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def main():
"""
Main evaluate function
"""
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
main_arch = cfg.architecture
multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)
# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# build program
model = create(main_arch)
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['EvalReader']['inputs_def']
feed_vars, loader = model.build_inputs(**inputs_def)
if multi_scale_test is None:
fetches = model.eval(feed_vars)
else:
fetches = model.eval(feed_vars, multi_scale_test)
eval_prog = eval_prog.clone(True)
reader = create_reader(cfg.EvalReader, devices_num=1)
loader.set_sample_list_generator(reader, place)
dataset = cfg['EvalReader']['dataset']
# eval already exists json file
if FLAGS.json_eval:
logger.info(
"In json_eval mode, PaddleDetection will evaluate json files in "
"output_eval directly. And proposal.json, bbox.json and mask.json "
"will be detected by default.")
json_eval_results(
cfg.metric, json_directory=FLAGS.output_eval, dataset=dataset)
return
compile_program = fluid.CompiledProgram(eval_prog).with_data_parallel()
assert cfg.metric != 'OID', "eval process of OID dataset \
is not supported."
if cfg.metric == "WIDERFACE":
raise ValueError("metric type {} does not support in tools/eval.py, "
"please use tools/face_eval.py".format(cfg.metric))
assert cfg.metric in ['COCO', 'VOC'], \
"unknown metric type {}".format(cfg.metric)
extra_keys = []
if cfg.metric == 'COCO':
extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg.metric == 'VOC':
extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']
keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)
# whether output bbox is normalized in model output layer
is_bbox_normalized = False
if hasattr(model, 'is_bbox_normalized') and \
callable(model.is_bbox_normalized):
is_bbox_normalized = model.is_bbox_normalized()
sub_eval_prog = None
sub_keys = None
sub_values = None
# build sub-program
if 'Mask' in main_arch and multi_scale_test:
sub_eval_prog = fluid.Program()
with fluid.program_guard(sub_eval_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['EvalReader']['inputs_def']
inputs_def['mask_branch'] = True
feed_vars, eval_loader = model.build_inputs(**inputs_def)
sub_fetches = model.eval(
feed_vars, multi_scale_test, mask_branch=True)
assert cfg.metric == 'COCO'
extra_keys = ['im_id', 'im_shape']
sub_keys, sub_values, _ = parse_fetches(sub_fetches, sub_eval_prog,
extra_keys)
sub_eval_prog = sub_eval_prog.clone(True)
#if 'weights' in cfg:
# checkpoint.load_params(exe, sub_eval_prog, cfg.weights)
# load model
exe.run(startup_prog)
if 'weights' in cfg:
checkpoint.load_params(exe, startup_prog, cfg.weights)
resolution = None
if 'Mask' in cfg.architecture:
resolution = model.mask_head.resolution
results = eval_run(exe, compile_program, loader, keys, values, cls, cfg,
sub_eval_prog, sub_keys, sub_values, resolution)
#print(cfg['EvalReader']['dataset'].__dict__)
# evaluation
# if map_type not set, use default 11point, only use in VOC eval
map_type = cfg.map_type if 'map_type' in cfg else '11point'
save_only = getattr(cfg, 'save_prediction_only', False)
eval_results(
results,
cfg.metric,
cfg.num_classes,
resolution,
is_bbox_normalized,
FLAGS.output_eval,
map_type,
dataset=dataset,
save_only=save_only)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"--json_eval",
action='store_true',
default=False,
help="Whether to re eval with already exists bbox.json or mask.json")
parser.add_argument(
"-f",
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
FLAGS = parser.parse_args()
main()
| 35.099448
| 91
| 0.665198
|
c227b833814fc3e261cc96c4e13963b77e77ef73
| 5,191
|
py
|
Python
|
FileRoutes.py
|
lucassusanto/storage-service
|
24d1218dfd13f7876ae85f5b08ad2aef0b03b0f8
|
[
"MIT"
] | 2
|
2020-08-07T08:00:26.000Z
|
2021-08-29T18:25:10.000Z
|
FileRoutes.py
|
lucassusanto/storage-service
|
24d1218dfd13f7876ae85f5b08ad2aef0b03b0f8
|
[
"MIT"
] | null | null | null |
FileRoutes.py
|
lucassusanto/storage-service
|
24d1218dfd13f7876ae85f5b08ad2aef0b03b0f8
|
[
"MIT"
] | null | null | null |
from flask import request, jsonify
from flask_restful import Resource
import os
from AuthController import *
from FileController import *
class CreateFile(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result = file.createFile(data['file_path'], data['file_content'])
if result == 1:
return jsonify(error='file exists')
if result == 2:
return jsonify(error='quota is full')
return jsonify(msg='success')
except Exception as e:
return jsonify(error=str(e))
class DeleteFile(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result = file.deleteFile(data['file_path'])
if result == 1:
return jsonify(error='file does not exist')
return jsonify(msg='success')
except Exception as e:
return jsonify(error=str(e))
class OpenFile(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result, content = file.openFile(data['file_path'])
if result == 1:
return jsonify(error='file does not exist')
return jsonify(msg='success', file_content=content)
except Exception as e:
return jsonify(error=str(e))
class ListFile(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result, lists = file.listFile(data['folder_path'])
if result == 1:
return jsonify(error='directory does not exist')
return jsonify(msg='success', files=lists)
except Exception as e:
return jsonify(error=str(e))
class CreateDir(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result = file.createDir(data['folder_path'])
if result == 1:
return jsonify(error='directory exists')
return jsonify(msg='success')
except Exception as e:
return jsonify(error=str(e))
class DeleteDir(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result = file.deleteDir(data['folder_path'])
if result == 1:
return jsonify(error='directory does not exist')
return jsonify(msg='success')
except Exception as e:
return jsonify(error=str(e))
class ListDir(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result, lists = file.listDir(data['folder_path'])
if result == 1:
return jsonify(error='directory does not exist')
return jsonify(msg='success', dirs=lists)
except Exception as e:
return jsonify(error=str(e))
class MoveFile(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result = file.moveFile(data['src'], data['des'])
if result == 1:
return jsonify(error='source file does not exist')
if result == 2:
return jsonify(error='destination directory does not exist')
return jsonify(msg='success')
except Exception as e:
return jsonify(error=str(e))
class MoveDir(Resource):
def __init__(self):
token = request.headers.get('Authorization')
self.username = Auth_Controller().verifyToken(token)
def post(self):
if not self.username:
return jsonify(error='invalid token')
try:
data = request.get_json(force=True)
file = File_Controller(self.username)
result = file.moveDir(data['src'], data['des'])
if result == 1:
return jsonify(error='source or destination folder does not exist')
return jsonify(msg='success')
except Exception as e:
return jsonify(error=str(e))
| 23.703196
| 71
| 0.708149
|
c77c528215e6e0a6dc5e908b1242b426b4612cbf
| 477
|
py
|
Python
|
assistant/products/migrations/0004_auto_20200813_0203.py
|
kapiak/ware_prod
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
[
"MIT"
] | null | null | null |
assistant/products/migrations/0004_auto_20200813_0203.py
|
kapiak/ware_prod
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
[
"MIT"
] | null | null | null |
assistant/products/migrations/0004_auto_20200813_0203.py
|
kapiak/ware_prod
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.9 on 2020-08-13 02:03
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20200810_1252'),
]
operations = [
migrations.AlterField(
model_name='productvariant',
name='cost_price',
field=djmoney.models.fields.MoneyField(blank=True, decimal_places=4, max_digits=20, null=True),
),
]
| 23.85
| 107
| 0.647799
|
a4335caf120c9a37b6f29af9e12ab7293a2d552e
| 50
|
py
|
Python
|
agendamentos/__init__.py
|
ceb10n/agendamentos
|
91e1044cad825db763474cb47935a0c42509ab32
|
[
"MIT"
] | 1
|
2019-02-20T00:37:14.000Z
|
2019-02-20T00:37:14.000Z
|
agendamentos/__init__.py
|
ceb10n/agendamentos
|
91e1044cad825db763474cb47935a0c42509ab32
|
[
"MIT"
] | null | null | null |
agendamentos/__init__.py
|
ceb10n/agendamentos
|
91e1044cad825db763474cb47935a0c42509ab32
|
[
"MIT"
] | null | null | null |
from .app import create_app
from .config import *
| 16.666667
| 27
| 0.78
|
282a2d6fb8531223acb5bc3779eee120bf326767
| 3,131
|
py
|
Python
|
mergify_engine/exceptions.py
|
GuillaumeOj/mergify-engine
|
c11c05d2fa7db8e7195c57147f1c6a845009a718
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/exceptions.py
|
GuillaumeOj/mergify-engine
|
c11c05d2fa7db8e7195c57147f1c6a845009a718
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/exceptions.py
|
GuillaumeOj/mergify-engine
|
c11c05d2fa7db8e7195c57147f1c6a845009a718
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mergify_engine.clients import http
class MergifyNotInstalled(Exception):
pass
class RateLimited(Exception):
def __init__(self, countdown, raw_data):
self.countdown = countdown
self.raw_data = raw_data
class MergeableStateUnknown(Exception):
def __init__(self, ctxt):
self.ctxt = ctxt
RATE_LIMIT_RETRY_MIN = 3
BASE_RETRY_TIMEOUT = 60
NOT_ACCESSIBLE_REPOSITORY_MESSAGES = [
"Repository access blocked", # Blocked Github Account or Repo
"Resource not accessible by integration", # missing permission
]
MISSING_REPOSITORY_DATA_MESSAGE = "Sorry, there was a problem generating this diff. The repository may be missing relevant data."
def should_be_ignored(exception):
if isinstance(exception, http.HTTPClientSideError):
if (
exception.status_code == 403
and exception.message in NOT_ACCESSIBLE_REPOSITORY_MESSAGES
):
return True
elif (
exception.status_code == 422
and exception.message == MISSING_REPOSITORY_DATA_MESSAGE
):
return True
# NOTE(sileht): a repository return 404 for /pulls..., so can't do much
elif exception.status_code == 404 and str(exception.request.url).endswith(
"/pulls"
):
return True
return False
def need_retry(exception): # pragma: no cover
if isinstance(exception, RateLimited):
# NOTE(sileht): when we are close to reset date, and since utc time between us and
# github differ a bit, we can have negative delta, so set a minimun for retrying
return max(exception.countdown, RATE_LIMIT_RETRY_MIN)
elif isinstance(exception, MergeableStateUnknown):
return BASE_RETRY_TIMEOUT
elif isinstance(exception, (http.RequestError, http.HTTPServerSideError)):
# NOTE(sileht): We already retry locally with urllib3, so if we get there, Github
# is in a really bad shape...
return BASE_RETRY_TIMEOUT * 5
# NOTE(sileht): Most of the times token are just temporary invalid, Why ?
# no idea, ask Github...
elif isinstance(exception, http.HTTPClientSideError):
# Bad creds or token expired, we can't really known
if exception.response.status_code == 401:
return BASE_RETRY_TIMEOUT
# Rate limit or abuse detection mechanism, futures events will be rate limited
# correctly by mergify_engine.utils.Github()
elif exception.response.status_code == 403:
return BASE_RETRY_TIMEOUT * 5
| 35.579545
| 129
| 0.695944
|
4e5119f1ad81b58dbc03b46d8c1d3bf29671715e
| 5,152
|
py
|
Python
|
timescans/calibration.py
|
tjlane/timescans
|
9b847ebc75602d0e5701656f7737ec6371c09f2b
|
[
"MIT"
] | null | null | null |
timescans/calibration.py
|
tjlane/timescans
|
9b847ebc75602d0e5701656f7737ec6371c09f2b
|
[
"MIT"
] | 1
|
2018-10-18T11:36:32.000Z
|
2018-10-18T11:36:32.000Z
|
timescans/calibration.py
|
tjlane/timescans
|
9b847ebc75602d0e5701656f7737ec6371c09f2b
|
[
"MIT"
] | 1
|
2020-12-13T00:31:42.000Z
|
2020-12-13T00:31:42.000Z
|
import os
import numpy as np
from matplotlib import pyplot as plt
def fit_errors(x, y, y_hat, bin_size):
"""
Compute errors for fit both 'locally' and 'globally'.
Specifically, the R^2 statistic is computed as usual (global).
Additionally, the data are binned across the domain (x > bin_size)
and for each bin an RMSE is computed.
Parameters
----------
x, y, y_hat : np.ndarray
Equal-length 1D arrays of the domain, range, and prediction
respectively.
bin_size : float
The resolution in which to bin `x` for local error computation.
Returns
-------
r_sq : float
The usual R^2 statistic.
rmes : np.ndarray
A 2D array of local errors. The first column is the bin center of
mass (mean of x's in the bin), the second column is the prediction
averaged across that bin (mean of y_hat's in the bin), the final
column is the RMSE for the prediction in that bin.
Example
-------
>>> r_sq, rmes = fit_errors(x, y, y_hat)
>>> plot(x,y,'.')
>>> plot(x,y_hat,'-')
>>> plot(rmes[:,0],rmes[:,1] - rmes[:,2],'k-')
>>> plot(rmes[:,0],rmes[:,1] + rmes[:,2],'k-')
"""
ssq = lambda x : np.sum(np.square(x))
# global R^2
ssres = ssq(y - y_hat)
sstot = ssq(y - np.mean(y))
r_sq = 1.0 - ssres / sstot
# per-bin RME
bins = np.arange(x.min(), x.max()+bin_size*2, bin_size)
assign = np.digitize(x, bins)
uq = np.unique(assign)
rmes = np.zeros((len(uq), 3))
# get all x's that fall in a bin & compute rme
for i,u in enumerate(uq):
idx = (assign == u) # index of pts in bin
rmes[i,0] = np.mean(x[idx])
rmes[i,1] = np.mean(y_hat[idx])
rmes[i,2] = np.sqrt(ssq( y[idx] - y_hat[idx] ) / np.sum(idx) )
return r_sq, rmes
def analyze_calibration_run(exp, run, las_delay_pvname, px_cutoffs=(200, 800)):
"""
Analyze a run where the timetool camera is fixed but the laser delay
changes by a known amount in order to calibrate the TT camera pixel-
time conversion.
"""
import psana
ds = psana.DataSource('exp=%s:run=%d:smd' % (exp, run))
las_dly = psana.Detector(las_delay_pvname, ds.env())
if exp[:3] == 'cxi':
tt_edge = psana.Detector('CXI:TTSPEC:FLTPOS', ds.env())
tt_famp = psana.Detector('CXI:TTSPEC:AMPL', ds.env())
tt_fwhm = psana.Detector('CXI:TTSPEC:FLTPOSFWHM', ds.env())
elif exp[:3] == 'xpp':
tt_edge = psana.Detector('XPP:TIMETOOL:FLTPOS', ds.env())
tt_famp = psana.Detector('XPP:TIMETOOL:AMPL', ds.env())
tt_fwhm = psana.Detector('XPP:TIMETOOL:FLTPOSFWHM', ds.env())
delay_pxl_data = []
for i,evt in enumerate(ds.events()):
print "analyzing event: %d\r" % (i+1),
# perform some checks on the fit amp and fwhm
if (tt_fwhm(evt) > 250.0) or (tt_fwhm(evt) < 50.0): continue
if (tt_famp(evt) < 0.03): continue
edge = tt_edge(evt)
if (px_cutoffs[0] <= edge) and (edge <= px_cutoffs[1]):
delay_pxl_data.append([ tt_edge(evt), las_dly(evt) ])
if i == 15000: break # debugging
delay_pxl_data = np.array(delay_pxl_data)
print "Analyzing in-range %d events" % delay_pxl_data.shape[0]
out_path = os.path.join(os.environ['HOME'],
'tt_calib_data_%s_r%d.txt' % (exp, run))
print "saving raw calibration data --> %s" % out_path
np.savetxt(out_path, delay_pxl_data)
# from docs >> fs_result = a + b*x + c*x^2, x is edge position
fit = np.polyfit(delay_pxl_data[:,0], delay_pxl_data[:,1], 2)
c, b, a = fit * 1000.0
p = np.poly1d(fit)
r_sq, rmes = fit_errors(delay_pxl_data[:,0], delay_pxl_data[:,1],
p(delay_pxl_data[:,0]), 50)
print "\nFIT RESULTS"
print "ps_result = a + b*x + c*x^2, x is edge position"
print "------------------------------------------------"
print "a = %.12f" % a
print "b = %.12f" % b
print "c = %.12f" % c
print "R^2 = %f" % r_sq
print "------------------------------------------------"
print "fit range (tt pixels): %d <> %d" % px_cutoffs
print "time range (ps): %f <> %f" % ( p(px_cutoffs[0]),
p(px_cutoffs[1]) )
print "------------------------------------------------"
x = np.linspace(delay_pxl_data[:,0].min(), delay_pxl_data[:,0].max(), 101)
# make a plot
plt.figure()
plt.plot(delay_pxl_data[:,0], delay_pxl_data[:,1], '.')
plt.plot(x, p(x),'r-')
plt.plot(rmes[:,0], rmes[:,1] - rmes[:,2]*3,'k-')
plt.plot(rmes[:,0], rmes[:,1] + rmes[:,2]*3,'k-')
plt.legend(['events', 'fit', '-3 $\sigma$', '+3 $\sigma$'])
plt.xlabel('Edge Position (pixels) [TTSPEC:FLTPOS]')
plt.ylabel('Laser Delay (ps) [%s]' % las_delay_pvname)
plt.xlim([0, 1024])
plt.show()
# push results upstream to DAQ config
# save results to calib dir
return
if __name__ == '__main__':
analyze_calibration_run('cxii2415', 65, 'LAS:FS5:VIT:FS_TGT_TIME_DIAL', ffb=False)
| 30.485207
| 86
| 0.55823
|
ea36c5f4187ebe3bae5e93c5d8a1dd4bf296c226
| 4,453
|
py
|
Python
|
Spaceship/parameters/aerosonde_parameters_Gazebo.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2020-06-07T00:14:42.000Z
|
2020-06-07T00:14:42.000Z
|
Spaceship/parameters/aerosonde_parameters_Gazebo.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | null | null | null |
Spaceship/parameters/aerosonde_parameters_Gazebo.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2019-06-24T22:10:48.000Z
|
2019-06-24T22:10:48.000Z
|
import sys
sys.path.append('..')
import numpy as np
from tools.tools import Euler2Quaternion
######################################################################################
# Initial Conditions
######################################################################################
# Initial conditions for MAV
pn0 = 0. # initial north position
pe0 = 0. # initial east position
pd0 = -100.0 # initial down position
u0 = 25. # initial velocity along body x-axis
v0 = 0. # initial velocity along body y-axis
w0 = 0. # initial velocity along body z-axis
phi0 = 0. # initial roll angle
theta0 = 0. # initial pitch angle
psi0 = 0.0 # initial yaw angle
p0 = 0 # initial roll rate
q0 = 0 # initial pitch rate
r0 = 0 # initial yaw rate
Va0 = np.sqrt(u0**2+v0**2+w0**2)
# Quaternion State
e = Euler2Quaternion(phi0, theta0, psi0)
e0 = e.item(0)
e1 = e.item(1)
e2 = e.item(2)
e3 = e.item(3)
######################################################################################
# Physical Parameters
######################################################################################
mass = 13.5 #kg
Jx = 0.8244 #kg m^2
Jy = 1.135
Jz = 1.759
Jxz = 0.1204
S_wing = 0.55
b = 2.8956
c = 0.18994
S_prop = 0.202
rho = 1.2682
e = 0.9
AR = (b**2) / S_wing
gravity = 9.8
######################################################################################
# Longitudinal Coefficients
######################################################################################
C_L_0 = 0.28
C_D_0 = 0.03
C_m_0 = -0.02338
C_L_alpha = 3.45
C_D_alpha = 0.3
C_m_alpha = -0.38
C_L_beta = 0.0
C_L_q = 0.0
C_D_q = 0.0
C_m_q = -3.6
C_L_delta_a = 0.0
C_L_delta_e = -0.36
C_L_delta_r = 0.0
C_D_delta_e = 0.0
C_m_delta_e = -0.5
M = 50.0
alpha0 = 0.18994
epsilon = 2.8956
C_D_p = 0.0437
######################################################################################
# Lateral Coefficients
######################################################################################
C_Y_0 = 0.0
C_ell_0 = 0.0
C_n_0 = 0.0
C_Y_beta = -0.98
C_ell_beta = -0.12
C_n_beta = 0.25
C_Y_p = 0.0
C_ell_p = -0.26
C_n_p = 0.022
C_Y_r = 0.0
C_ell_r = 0.14
C_n_r = -0.35
C_Y_delta_a = 0.0
C_ell_delta_a = 0.08
C_n_delta_a = 0.06
C_Y_delta_r = -0.017
C_ell_delta_r = 0.105
C_n_delta_r = -0.032
######################################################################################
# Propeller thrust / torque parameters (see addendum by McLain)
######################################################################################
C_prop = 1.0
S_prop = 0.0314
k_motor = 20
kTp = 0.
kOmega = 0.
# Prop parameters
D_prop = 20*(0.0254) # prop diameter in m
# Motor parameters
K_V = 145. # from datasheet RPM/V
KQ = (1. / K_V) * 60. / (2. * np.pi) # KQ in N-m/A, V-s/rad
R_motor = 0.042 # ohms
i0 = 1.5 # no-load (zero-torque) current (A)
# Inputs
ncells = 12.
V_max = 3.7 * ncells # max voltage for specified number of battery cells
# Coeffiecients from prop_data fit
C_Q2 = -0.01664
C_Q1 = 0.004970
C_Q0 = 0.005230
C_T2 = -0.1079
C_T1 = -0.06044
C_T0 = 0.09357
######################################################################################
# Calculation Variables
######################################################################################
# gamma parameters pulled from page 36 (dynamics)
gamma = Jx * Jz - (Jxz**2)
gamma1 = (Jxz * (Jx - Jy + Jz)) / gamma
gamma2 = (Jz * (Jz - Jy) + (Jxz**2)) / gamma
gamma3 = Jz / gamma
gamma4 = Jxz / gamma
gamma5 = (Jz - Jx) / Jy
gamma6 = Jxz / Jy
gamma7 = ((Jx - Jy) * Jx + (Jxz**2)) / gamma
gamma8 = Jx / gamma
# C values defines on pag 62
C_p_0 = gamma3 * C_ell_0 + gamma4 * C_n_0
C_p_beta = gamma3 * C_ell_beta + gamma4 * C_n_beta
C_p_p = gamma3 * C_ell_p + gamma4 * C_n_p
C_p_r = gamma3 * C_ell_r + gamma4 * C_n_r
C_p_delta_a = gamma3 * C_ell_delta_a + gamma4 * C_n_delta_a
C_p_delta_r = gamma3 * C_ell_delta_r + gamma4 * C_n_delta_r
C_r_0 = gamma4 * C_ell_0 + gamma8 * C_n_0
C_r_beta = gamma4 * C_ell_beta + gamma8 * C_n_beta
C_r_p = gamma4 * C_ell_p + gamma8 * C_n_p
C_r_r = gamma4 * C_ell_r + gamma8 * C_n_r
C_r_delta_a = gamma4 * C_ell_delta_a + gamma8 * C_n_delta_a
C_r_delta_r = gamma4 * C_ell_delta_r + gamma8 * C_n_delta_r
| 29.490066
| 86
| 0.473838
|
9645b387f8f24a9108932a9eb21d2a7aa641df46
| 1,063
|
py
|
Python
|
Applications/SlicerApp/Testing/Python/ScriptedModuleDiscoveryTest/ModuleD_WithFileDialog_WithoutWidget.py
|
TheInterventionCentre/NorMIT-Plan-App
|
765ed9a5dccc1cc134b65ccabe93fc132baeb2ea
|
[
"MIT"
] | null | null | null |
Applications/SlicerApp/Testing/Python/ScriptedModuleDiscoveryTest/ModuleD_WithFileDialog_WithoutWidget.py
|
TheInterventionCentre/NorMIT-Plan-App
|
765ed9a5dccc1cc134b65ccabe93fc132baeb2ea
|
[
"MIT"
] | null | null | null |
Applications/SlicerApp/Testing/Python/ScriptedModuleDiscoveryTest/ModuleD_WithFileDialog_WithoutWidget.py
|
TheInterventionCentre/NorMIT-Plan-App
|
765ed9a5dccc1cc134b65ccabe93fc132baeb2ea
|
[
"MIT"
] | null | null | null |
from slicer.ScriptedLoadableModule import *
SOMEVAR = 'D'
class ModuleD_WithFileDialog_WithoutWidget(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Module A"
self.parent.contributors = ["Jean-Christophe Fillion-Robin (Kitware)",]
self.parent.helpText = """
This module allows to test the scripted module import.
"""
self.parent.acknowledgementText = """
Developed by Jean-Christophe Fillion-Robin, Kitware Inc.,
partially funded by NIH grant 3P41RR013218-12S1.
"""
def somevar(self):
return SOMEVAR
class DICOMFileDialog:
def __init__(self,qSlicerFileDialog):
self.qSlicerFileDialog = qSlicerFileDialog
qSlicerFileDialog.fileType = 'Foo Directory'
qSlicerFileDialog.description = 'Do something awesome with Foo'
qSlicerFileDialog.action = slicer.qSlicerFileDialog.Read
def execDialog(self):
pass
def isMimeDataAccepted(self):
self.qSlicerFileDialog.acceptMimeData(True)
def dropEvent(self):
pass
| 27.25641
| 75
| 0.742239
|
7f86d93a6fca2d20f5f27a475dee41ec7014d8e9
| 6,323
|
py
|
Python
|
Python code/QPA without SSL/TestingFunctions.py
|
wsherry/QuantumPasswordAuthentication
|
682751d28c3fdcd1166749621cbb0e9db5fc169c
|
[
"MIT"
] | 1
|
2021-10-21T21:56:31.000Z
|
2021-10-21T21:56:31.000Z
|
Python code/QPA without SSL/TestingFunctions.py
|
wsherry/QuantumPasswordAuthentication
|
682751d28c3fdcd1166749621cbb0e9db5fc169c
|
[
"MIT"
] | null | null | null |
Python code/QPA without SSL/TestingFunctions.py
|
wsherry/QuantumPasswordAuthentication
|
682751d28c3fdcd1166749621cbb0e9db5fc169c
|
[
"MIT"
] | 1
|
2022-02-03T22:42:43.000Z
|
2022-02-03T22:42:43.000Z
|
from qiskit import *
def get_programs_for_test(server, challenge_input, program, permutation_key, sp_list, sp_mit_single_list, sp_mit_all_list, dp_list, meas_filter_singles, meas_filter_alls, rev_cnots, backend, num_shots):
"""
Prepares circuits for execution
Parameters:
server (Server): Server instance
challenge_input (str): challenge point for testing programs
program (qiskit's QuantumCicuit): the encoded program for applying tests
permutation_key ([int]): permutation ordering
sp_list ([qiskit's QuantumCircuits]): list of prepared syndrome quantum circuits
sp_mit_single_list ([qiskit's QuantumCircuits]): list of prepared syndrome quantum circuits with partial measurement and single qubit patterns
sp_mit_all_list ([qiskit's QuantumCircuits]): list of prepared quantum circuits with syndromes with partial measurement and single and qubit pair patterns
dp_list ([qiskit's QuantumCircuits]): list of prepared undoed quantum circuits
meas_filter_singles ([qiskit's TensoredMeasFitter.filter]): list of tensored measurement filters for sp_mit_single_list circuits
meas_filter_alls ([qiskit's TensoredMeasFitter.filter]): list of tensored measurement filters for sp_mit_all_list circuits
rev_cnots ([[int,int]]): cnot gates to be applied for undoing the circuit
backend (qiskit's IBMQBackend): specified backend for preparing measurement filter
num_shots (int): number of shots for backend
Returns:
sp_list ([qiskit's QuantumCircuits]): list of prepared syndrome quantum circuits
sp_mit_single_list ([qiskit's QuantumCircuits]): list of prepared syndrome quantum circuits with partial measurement and single qubit patterns
sp_mit_all_list ([qiskit's QuantumCircuits]): list of prepared quantum circuits with syndromes with partial measurement and single and double qubit patterns
dp_list ([qiskit's QuantumCircuits]): list of prepared undoed quantum circuits
meas_filter_singles ([qiskit's TensoredMeasFitter.filter]): list of tensored measurement filters for sp_mit_single_list circuits
meas_filter_alls ([qiskit's TensoredMeasFitter.filter]): list of tensored measurement filters for sp_mit_all_list circuits
mit_pattern_s ([[int]]): subset of single qubits used in tensored error mitigation, based on the circuits sp_mit_single_list
mit_pattern_all (list): subset of single and double qubits used in tensored error mitigation, based on the circuits sp_mit_all_list
"""
syndrome_program = server.get_syndrome_circuit(challenge_input, program)
mit_pattern_s, mit_val_s = server.get_random_mit_pattern_single()
mit_pattern_all, mit_val_all = server.get_random_mit_pattern_all(
permutation_key)
syndrome_program_mit_single = server.get_syndrome_circuit_mit_measures(
mit_val_s, challenge_input, program)
syndrome_program_mit_all = server.get_syndrome_circuit_mit_measures(
mit_val_all, challenge_input, program)
decoded_program = server.undo_circuit(
challenge_input, program, rev_cnots=rev_cnots)
meas_filter_s = server.prepare_meas_filter(
mit_pattern_s, backend, num_shots)
meas_filter_all = server.prepare_meas_filter(
mit_pattern_all, backend, num_shots)
sp_list = sp_list + [syndrome_program]
sp_mit_single_list = sp_mit_single_list + [syndrome_program_mit_single]
sp_mit_all_list = sp_mit_all_list + [syndrome_program_mit_all]
dp_list = dp_list + [decoded_program]
meas_filter_singles = meas_filter_singles + [meas_filter_s]
meas_filter_alls = meas_filter_alls + [meas_filter_all]
return sp_list, sp_mit_single_list, sp_mit_all_list, dp_list, meas_filter_singles, meas_filter_alls, mit_pattern_s, mit_pattern_all
def prepare_for_test(server, cnots):
"""
Prepare inputs for test
Parameters:
server (Server): instance of Server for preparing inputs
cnots ([[int,int]]): cnot gates to be applied
Returns:
p (str): point
k1 ([int]): permutation key
key2 ([[int],[int]]): one-time pad key
permuted_cnots([[int,int]]): cnot gates post permutation
permuted_hadamards ([int]): hadamard gates post permutation
x_key ([int]): all delegated pauli-X gates to be applied for one-time pad (key2)
z_key ([int]): all delegated pauli-Z gates to be applied for one-time pad (key2)
data (list): qubits' intial states
"""
p, k1, k2 = server.generate_point()
key2 = [k2[0][:], k2[1][:]]
permuted_cnots, permuted_hadamards = server.permute_classical(k1, cnots)
rev = server.reverse_cnots(permuted_cnots)
x_key, z_key = server.get_OTP_classical_key(
k2, k1, rev, permuted_hadamards)
data = [0]*14
data[k1.index(7)] = '+'
return p, k1, key2, permuted_cnots, permuted_hadamards, x_key, z_key, data
def get_transpiled_circuit_and_depth(circuit_list, backend, init_qubits, opt_level, num_seeds):
"""
Gets the list of transpiled circuits with the least gate depths based on the random seeds of the specified quantum backend
Parameters:
circuit_list ([qiskit's QuantumCircuit]): list of circuits to be transpiled
backend (qiskit's IBMQBackend): specified quantum computer for transpiling the circuits
init_qubits ([int]): mapping of virtual to physical qubits
opt_level (int): the optimization level of the transpiled circuits
num_seeds (int): the number of random seeds to iterate through
Returns:
transpiled_list ([qiskit's QuantumCircuit]): transpiled circuits with the least gate depths
transpiled_depths ([int]): corresponding gate depths of transpiled_list
"""
transpiled_list = []
transpiled_depths = []
for i in range(len(circuit_list)):
min_circ = transpile(
circuit_list[i], backend, initial_layout=init_qubits[i])
min_depth = min_circ.depth()
for j in range(num_seeds):
transpiled_circ = transpile(
circuit_list[i], backend, initial_layout=init_qubits[i], optimization_level=opt_level)
depth = transpiled_circ.depth()
if depth < min_depth:
min_depth = depth
min_circ = transpiled_circ
transpiled_list.append(min_circ)
transpiled_depths.append(min_circ.depth())
return transpiled_list, transpiled_depths
| 54.508621
| 202
| 0.748537
|
e093bcfedae0d88f489e810b6a81191862ee9e30
| 1,058
|
py
|
Python
|
workers/jobs/master_nodes.py
|
thiagopena/grua
|
d8b56d2b205c092a06866fffa689f0d831d1b338
|
[
"Apache-2.0"
] | null | null | null |
workers/jobs/master_nodes.py
|
thiagopena/grua
|
d8b56d2b205c092a06866fffa689f0d831d1b338
|
[
"Apache-2.0"
] | null | null | null |
workers/jobs/master_nodes.py
|
thiagopena/grua
|
d8b56d2b205c092a06866fffa689f0d831d1b338
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from glom import glom, Literal
from common import CertsClass
def nodes_sync(master_id, master_address):
cert_instance = CertsClass(master_id)
cert_path = cert_instance.get_cert()
private_key_path = cert_instance.get_key()
req = requests.get(
f"https://{master_address}:8081/pdb/meta/v1/version",
verify=False,
cert=(cert_path, private_key_path),
)
if req.status_code == 404:
# PuppetDB <= 2.x
nodes_uri = f"https://{master_address}:8081/v4/nodes"
else:
# PuppetDB >= 3.x
nodes_uri = f"https://{master_address}:8081/pdb/query/v4/nodes"
req = requests.get(nodes_uri, verify=False, cert=(cert_path, private_key_path))
target = req.json()
spec = [{"certname": "certname", "master_zone": Literal(master_id)}]
user = os.environ.get("WEBAPP_USER", "")
password = os.environ.get("WEBAPP_PASS", "")
requests.post(
"http://webapp:8000/api/nodes/sync/",
json=glom(target, spec),
auth=(user, password),
)
| 32.060606
| 83
| 0.647448
|
9c3e7eeb84884241c97e3a2684b33523a4072273
| 5,165
|
py
|
Python
|
baselines/common/vec_env/test_vec_env.py
|
rainwangphy/DeepRL
|
140dfa209514400de8981660c04eed9e7065a4b6
|
[
"MIT"
] | null | null | null |
baselines/common/vec_env/test_vec_env.py
|
rainwangphy/DeepRL
|
140dfa209514400de8981660c04eed9e7065a4b6
|
[
"MIT"
] | null | null | null |
baselines/common/vec_env/test_vec_env.py
|
rainwangphy/DeepRL
|
140dfa209514400de8981660c04eed9e7065a4b6
|
[
"MIT"
] | null | null | null |
"""
Tests for asynchronous vectorized environments.
"""
import gym
import numpy as np
import pytest
from .dummy_vec_env import DummyVecEnv
from .shmem_vec_env import ShmemVecEnv
from .subproc_vec_env import SubprocVecEnv
from baselines.common.tests.test_with_mpi import with_mpi
def assert_venvs_equal(venv1, venv2, num_steps):
"""
Compare two environments over num_steps steps and make sure
that the observations produced by each are the same when given
the same actions.
"""
assert venv1.num_envs == venv2.num_envs
assert venv1.observation_space.shape == venv2.observation_space.shape
assert venv1.observation_space.dtype == venv2.observation_space.dtype
assert venv1.action_space.shape == venv2.action_space.shape
assert venv1.action_space.dtype == venv2.action_space.dtype
try:
obs1, obs2 = venv1.reset(), venv2.reset()
assert np.array(obs1).shape == np.array(obs2).shape
assert np.array(obs1).shape == (venv1.num_envs,) + venv1.observation_space.shape
assert np.allclose(obs1, obs2)
venv1.action_space.seed(1337)
for _ in range(num_steps):
actions = np.array([venv1.action_space.sample() for _ in range(venv1.num_envs)])
for venv in [venv1, venv2]:
venv.step_async(actions)
outs1 = venv1.step_wait()
outs2 = venv2.step_wait()
for out1, out2 in zip(outs1[:3], outs2[:3]):
assert np.array(out1).shape == np.array(out2).shape
assert np.allclose(out1, out2)
assert list(outs1[3]) == list(outs2[3])
finally:
venv1.close()
venv2.close()
@pytest.mark.parametrize('klass', (ShmemVecEnv, SubprocVecEnv))
@pytest.mark.parametrize('dtype', ('uint8', 'float32'))
def test_vec_env(klass, dtype): # pylint: disable=R0914
"""
Test that a vectorized environment is equivalent to
DummyVecEnv, since DummyVecEnv is less likely to be
error prone.
"""
num_envs = 3
num_steps = 100
shape = (3, 8)
def make_fn(seed):
"""
Get an environment constructor with a seed.
"""
return lambda: SimpleEnv(seed, shape, dtype)
fns = [make_fn(i) for i in range(num_envs)]
env1 = DummyVecEnv(fns)
env2 = klass(fns)
assert_venvs_equal(env1, env2, num_steps=num_steps)
@pytest.mark.parametrize('dtype', ('uint8', 'float32'))
@pytest.mark.parametrize('num_envs_in_series', (3, 4, 6))
def test_sync_sampling(dtype, num_envs_in_series):
"""
Test that a SubprocVecEnv running with envs in series
outputs the same as DummyVecEnv.
"""
num_envs = 12
num_steps = 100
shape = (3, 8)
def make_fn(seed):
"""
Get an environment constructor with a seed.
"""
return lambda: SimpleEnv(seed, shape, dtype)
fns = [make_fn(i) for i in range(num_envs)]
env1 = DummyVecEnv(fns)
env2 = SubprocVecEnv(fns, in_series=num_envs_in_series)
assert_venvs_equal(env1, env2, num_steps=num_steps)
@pytest.mark.parametrize('dtype', ('uint8', 'float32'))
@pytest.mark.parametrize('num_envs_in_series', (3, 4, 6))
def test_sync_sampling_sanity(dtype, num_envs_in_series):
"""
Test that a SubprocVecEnv running with envs in series
outputs the same as SubprocVecEnv without running in series.
"""
num_envs = 12
num_steps = 100
shape = (3, 8)
def make_fn(seed):
"""
Get an environment constructor with a seed.
"""
return lambda: SimpleEnv(seed, shape, dtype)
fns = [make_fn(i) for i in range(num_envs)]
env1 = SubprocVecEnv(fns)
env2 = SubprocVecEnv(fns, in_series=num_envs_in_series)
assert_venvs_equal(env1, env2, num_steps=num_steps)
class SimpleEnv(gym.Env):
"""
An environment with a pre-determined observation space
and RNG seed.
"""
def __init__(self, seed, shape, dtype):
np.random.seed(seed)
self._dtype = dtype
self._start_obs = np.array(np.random.randint(0, 0x100, size=shape),
dtype=dtype)
self._max_steps = seed + 1
self._cur_obs = None
self._cur_step = 0
# this is 0xFF instead of 0x100 because the Box space includes
# the high end, while randint does not
self.action_space = gym.spaces.Box(low=0, high=0xFF, shape=shape, dtype=dtype)
self.observation_space = self.action_space
def step(self, action):
self._cur_obs += np.array(action, dtype=self._dtype)
self._cur_step += 1
done = self._cur_step >= self._max_steps
reward = self._cur_step / self._max_steps
return self._cur_obs, reward, done, {'foo': 'bar' + str(reward)}
def reset(self):
self._cur_obs = self._start_obs
self._cur_step = 0
return self._cur_obs
def render(self, mode=None):
raise NotImplementedError
@with_mpi()
def test_mpi_with_subprocvecenv():
shape = (2, 3, 4)
nenv = 1
venv = SubprocVecEnv([lambda: SimpleEnv(0, shape, 'float32')] * nenv)
ob = venv.reset()
venv.close()
assert ob.shape == (nenv,) + shape
| 32.28125
| 92
| 0.650532
|
3dc3266224b3f954ad2f78e5c61458ff98ad22fa
| 5,138
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_vrf.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_vrf.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_vrf.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Mikhail Yohman (@FragmentedPacket) <mikhail.yohman@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_vrf
short_description: Create, update or delete vrfs within NetBox
description:
- Creates, updates or removes vrfs from NetBox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: '0.1.0'
extends_documentation_fragment:
- netbox.netbox.common
options:
data:
type: dict
description:
- Defines the vrf configuration
suboptions:
name:
description:
- The name of the vrf
required: true
type: str
rd:
description:
- The RD of the VRF. Must be quoted to pass as a string.
required: false
type: str
tenant:
description:
- The tenant that the vrf will be assigned to
required: false
type: raw
enforce_unique:
description:
- Prevent duplicate prefixes/IP addresses within this VRF
required: false
type: bool
import_targets:
description:
- Import targets tied to VRF
required: false
type: list
elements: str
version_added: 2.0.0
export_targets:
description:
- Export targets tied to VRF
required: false
type: list
elements: str
version_added: 2.0.0
description:
description:
- The description of the vrf
required: false
type: str
tags:
description:
- Any tags that the vrf may need to be associated with
required: false
type: list
elements: raw
custom_fields:
description:
- must exist in NetBox
required: false
type: dict
required: true
"""
EXAMPLES = r"""
- name: "Test NetBox modules"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create vrf within NetBox with only required information
netbox_vrf:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test VRF
state: present
- name: Delete vrf within netbox
netbox_vrf:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test VRF
state: absent
- name: Create vrf with all information
netbox_vrf:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test VRF
rd: "65000:1"
tenant: Test Tenant
enforce_unique: true
import_targets:
- "65000:65001"
export_targets:
- "65000:65001"
description: VRF description
tags:
- Schnozzberry
state: present
"""
RETURN = r"""
vrf:
description: Serialized object as created or already existent within NetBox
returned: success (when I(state=present))
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_ipam import (
NetboxIpamModule,
NB_VRFS,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
rd=dict(required=False, type="str"),
tenant=dict(required=False, type="raw"),
enforce_unique=dict(required=False, type="bool"),
import_targets=dict(required=False, type="list", elements="str"),
export_targets=dict(required=False, type="list", elements="str"),
description=dict(required=False, type="str"),
tags=dict(required=False, type="list", elements="raw"),
custom_fields=dict(required=False, type="dict"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_vrf = NetboxIpamModule(module, NB_VRFS)
netbox_vrf.run()
if __name__ == "__main__": # pragma: no cover
main()
| 26.760417
| 92
| 0.604905
|
4f57f589929ae035b505d822b74181d484718e85
| 6,416
|
py
|
Python
|
Examples/Python/models/contact_person.py
|
Duett-AS/DuettAPI
|
ffd2f6060d6cfcc62e18a1c3b00ba1db58d5d289
|
[
"MIT"
] | 4
|
2021-11-25T10:25:02.000Z
|
2021-12-01T09:37:55.000Z
|
Examples/Python/models/contact_person.py
|
Duett-AS/DuettAPI
|
ffd2f6060d6cfcc62e18a1c3b00ba1db58d5d289
|
[
"MIT"
] | null | null | null |
Examples/Python/models/contact_person.py
|
Duett-AS/DuettAPI
|
ffd2f6060d6cfcc62e18a1c3b00ba1db58d5d289
|
[
"MIT"
] | 2
|
2021-11-26T11:23:26.000Z
|
2021-12-09T10:44:34.000Z
|
# coding: utf-8
"""
OpenApi
##  _How to take the Duett api in use_ To be able to use the API, you must have an interaction key and a client key. Eksample curl: ```swift curl -X 'GET' \\ 'https://api.duett.no/article/v1 \\ -H 'accept: application/json' \\ -H 'X-Api-Integration-Key: 89ff1c42-9d90-435a-8a94-20207bc06e1a' \\ -H 'X-Api-Client-Key: 7896feb3-aaa2-4fd2-aaa2-c69de5fd1e5f' ``` ##### [Metode description][1] ##### [Filtering data in openApi][2] ### Use a code generator as to auto-generate api client: ##### [NSwagStudio](https://github.com/RicoSuter/NSwag/wiki/NSwagStudio) ##### [Visual studio add-in](https://marketplace.visualstudio.com/items?itemName=ChristianResmaHelle.ApiClientCodeGenerator) \\ ##### [Online code generator for many languages and versions](https://github.com/swagger-api/swagger-codegen) *** [1]: ../metods-help.html [2]: ../query-help.html # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ContactPerson(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'phone1': 'str',
'phone2': 'str',
'mobile': 'str',
'email': 'str'
}
attribute_map = {
'name': 'name',
'phone1': 'phone1',
'phone2': 'phone2',
'mobile': 'mobile',
'email': 'email'
}
def __init__(self, name=None, phone1=None, phone2=None, mobile=None, email=None): # noqa: E501
"""ContactPerson - a model defined in Swagger""" # noqa: E501
self._name = None
self._phone1 = None
self._phone2 = None
self._mobile = None
self._email = None
self.discriminator = None
self.name = name
if phone1 is not None:
self.phone1 = phone1
if phone2 is not None:
self.phone2 = phone2
if mobile is not None:
self.mobile = mobile
if email is not None:
self.email = email
@property
def name(self):
"""Gets the name of this ContactPerson. # noqa: E501
:return: The name of this ContactPerson. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ContactPerson.
:param name: The name of this ContactPerson. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def phone1(self):
"""Gets the phone1 of this ContactPerson. # noqa: E501
:return: The phone1 of this ContactPerson. # noqa: E501
:rtype: str
"""
return self._phone1
@phone1.setter
def phone1(self, phone1):
"""Sets the phone1 of this ContactPerson.
:param phone1: The phone1 of this ContactPerson. # noqa: E501
:type: str
"""
self._phone1 = phone1
@property
def phone2(self):
"""Gets the phone2 of this ContactPerson. # noqa: E501
:return: The phone2 of this ContactPerson. # noqa: E501
:rtype: str
"""
return self._phone2
@phone2.setter
def phone2(self, phone2):
"""Sets the phone2 of this ContactPerson.
:param phone2: The phone2 of this ContactPerson. # noqa: E501
:type: str
"""
self._phone2 = phone2
@property
def mobile(self):
"""Gets the mobile of this ContactPerson. # noqa: E501
:return: The mobile of this ContactPerson. # noqa: E501
:rtype: str
"""
return self._mobile
@mobile.setter
def mobile(self, mobile):
"""Sets the mobile of this ContactPerson.
:param mobile: The mobile of this ContactPerson. # noqa: E501
:type: str
"""
self._mobile = mobile
@property
def email(self):
"""Gets the email of this ContactPerson. # noqa: E501
:return: The email of this ContactPerson. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this ContactPerson.
:param email: The email of this ContactPerson. # noqa: E501
:type: str
"""
self._email = email
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ContactPerson, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContactPerson):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.703704
| 970
| 0.566552
|
c5cda4d32ae5321b9527eab418181bc1bc11c540
| 6,611
|
py
|
Python
|
src/currency_core.py
|
Benys94/currency_converter
|
d5762c1446fda370e9801988d227ef2f02d00747
|
[
"MIT"
] | null | null | null |
src/currency_core.py
|
Benys94/currency_converter
|
d5762c1446fda370e9801988d227ef2f02d00747
|
[
"MIT"
] | null | null | null |
src/currency_core.py
|
Benys94/currency_converter
|
d5762c1446fda370e9801988d227ef2f02d00747
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. Created on 2018-07-15
.. codeauthor:: David Benes <benys94@gmail.com>
"""
import requests
import simplejson
import sys
from os.path import join, abspath, dirname, pardir
from requests.exceptions import RequestException
from simplejson.errors import JSONDecodeError
sys.path.append(join(abspath(dirname(__file__)), pardir))
from src.common_utils import load_json_file
class APIRequestError(Exception):
"""
Exception for handling error during HTTP requests
"""
class CurrencyConversionError(Exception):
"""
Exception raised when currency given by user is invalid
"""
class CurrencyConverter:
"""
Class for currency conversion.
It handles all kind of currency operations.
"""
CURRENCIES_FILE = join(
abspath(dirname(__file__)), pardir, "data", "currencies.json"
)
def __init__(self, currencies_json=None):
self._rates_api_address = "https://ratesapi.io/api/latest"
# Set path to .json file with currencies and their symbols
json_filepath = self.CURRENCIES_FILE
if currencies_json is not None:
json_filepath = currencies_json
# Load data from file that contains JSON with all supported
# currencies and their symbols
try:
currencies_data = load_json_file(filepath=json_filepath)
except FileNotFoundError:
raise CurrencyConversionError(
"File '%s' with currency symbols doesn't exist" % json_filepath
)
except JSONDecodeError:
raise CurrencyConversionError(
"Invalid JSON in file '%s'" % json_filepath
)
else:
self._mapped_currencies = map_symbols_to_currencies(
currencies_data
)
# Try to get currency for given symbol. If symbol isn't found
# given value itself is returned as currency
self.symbol_to_currency = lambda x: self._mapped_currencies.get(x, x)
def send_api_request(self, payload=None):
"""
Get data from currency API service.
API returns data in JSON but we may never know what could be wrong
so string is also acceptable.
:param dict payload:
Parameters to be included into HTTP request
:return: Response data
:rtype: dict|str
:raises APIRequestError:
If there's any problem with API response
"""
try:
api_response = requests.get(
self._rates_api_address,
params=payload
)
except RequestException:
raise APIRequestError(
"Communication with ratesapi.io service has failed"
)
# Decode response and check for errors
response_data = decode_response_message(api_response)
if not isinstance(response_data, dict):
raise APIRequestError(response_data)
return response_data
def convert_currency(
self, currency_amount, input_currency, output_currency
):
"""
Convert amount of currency according to passed arguments.
:param float currency_amount:
Amount of currency to be converted
:param str input_currency:
Type of input currency (e.g. 'USD', 'EUR', '$', ...)
:param str output_currency:
Type of output currency (e.g. 'USD', 'EUR', '$', ...)
:return:
Tuple with base currency and dictionary with conversion results
:rtype: tuple(str, dict)
"""
req_payload = {
"base": self.symbol_to_currency(input_currency)
}
if output_currency is not None:
req_payload["symbols"] = self.symbol_to_currency(output_currency)
currency_rates = self.send_api_request(req_payload)
if "error" in currency_rates:
raise CurrencyConversionError(currency_rates["error"])
elif not currency_rates["rates"]:
raise CurrencyConversionError("Invalid output currency")
convert_results = count_currency_amounts(
currency_amount, currency_rates["rates"]
)
return req_payload["base"], convert_results
def count_currency_amounts(amount, currency_rates):
"""
Count results for all possible rates according to base currency.
:param float amount:
Amount of currency to be converted
:param dict currency_rates:
Dictionary with currency rates
{"<currency>": <rate>, ...}
:return:
Dictionary with all output currencies and computed values
:rtype: dict{'EUR': 34.55, 'USD': 45.28, ...}
"""
convert_results = {}
for currency, rate in currency_rates.items():
convert_results[currency] = round(rate * amount, 2)
return convert_results
def map_symbols_to_currencies(currencies):
"""
Create dictionary where key is symbol of currency and value is
currency itself
:param list currencies:
List of dictionaries with data about many currencies
:return: Dictionary with symbols and currencies
:rtype: dict
:raises KeyError: When given argument has wrong format
"""
result_dict = {}
for currency_dict in currencies:
result_dict[currency_dict["symbol"]] = currency_dict["cc"]
return result_dict
def decode_response_message(api_response):
"""
Decode message received from API.
Message should be in JSON format but there may be any problem
that will return only plain text response (e.g. 404 error).
:param requests.model.Response api_response:
Unsuccessful response from currency service API
:return:
Dictionary with decoded JSON.
If JSON couldn't be decoded plain text response is returned.
:rtype: dict|str
"""
try:
response_msg = api_response.json()
except JSONDecodeError:
return api_response.text
else:
return response_msg
def output_formatter(conversion_results, amount):
"""
Build output dictionary from computed data.
:param tuple conversion_results:
Tuple with base currency and dictionary
with conversion results.
:param float amount:
Amount of currency to convert
:return: Output string
:rtype: str
"""
base_currency, conversions = conversion_results
output_d = {
"input": {
"amount": amount,
"currency": base_currency
},
"output": conversions
}
return simplejson.dumps(output_d, indent=4)
| 30.606481
| 79
| 0.64665
|
0acc6bc8a6112a274b96396e47a252f60656f901
| 2,883
|
py
|
Python
|
python3/koans/about_method_bindings.py
|
BrianLoveGa/python_koans
|
1d1f8647d0882e2f8c3cbb8e209ff70d3c1dc279
|
[
"MIT"
] | null | null | null |
python3/koans/about_method_bindings.py
|
BrianLoveGa/python_koans
|
1d1f8647d0882e2f8c3cbb8e209ff70d3c1dc279
|
[
"MIT"
] | null | null | null |
python3/koans/about_method_bindings.py
|
BrianLoveGa/python_koans
|
1d1f8647d0882e2f8c3cbb8e209ff70d3c1dc279
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class:
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.__self__ == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual('parrot', obj.method())
self.assertEqual('parrot', obj.method.__func__(obj))
def test_functions_have_attributes(self):
obj = Class()
self.assertEqual(35, len(dir(function)))
self.assertEqual(True, dir(function) == dir(obj.method.__func__))
def test_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(27, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
with self.assertRaises(AttributeError):
obj.method.cherries = 3
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.__func__.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
function2.get_fruit = function
self.assertEqual('pineapple', function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
with self.assertRaises(AttributeError):
cls = function2.get_fruit.__self__
# ------------------------------------------------------------------
class BoundClass:
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual('BoundClass', bound_obj.__class__.__name__)
self.assertEqual('AboutMethodBindings',
binding_owner.__class__.__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor:
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual('purple', self.color.choice)
| 30.670213
| 81
| 0.63094
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.