hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7366fb79cc98cc8da4a20593adf754830266e3b
| 6,539
|
py
|
Python
|
bibpy/lexers/base_lexer.py
|
MisanthropicBit/bibpy
|
3195790105544672f622ed893213a627b5280f2b
|
[
"BSD-3-Clause"
] | 1
|
2021-08-18T13:17:10.000Z
|
2021-08-18T13:17:10.000Z
|
bibpy/lexers/base_lexer.py
|
MisanthropicBit/bibpy
|
3195790105544672f622ed893213a627b5280f2b
|
[
"BSD-3-Clause"
] | 7
|
2018-02-18T12:29:20.000Z
|
2020-05-14T18:08:48.000Z
|
bibpy/lexers/base_lexer.py
|
MisanthropicBit/bibpy
|
3195790105544672f622ed893213a627b5280f2b
|
[
"BSD-3-Clause"
] | 3
|
2018-02-17T18:27:43.000Z
|
2022-01-20T02:28:58.000Z
|
# -*- coding: utf-8 -*-
"""Base class for all lexers."""
import re
from funcparserlib.lexer import Token
class LexerError(Exception):
"""General lexer error."""
def __init__(self, msg, pos, char, lnum, brace_level, line):
"""Initialise with information on where the error occurred."""
self.msg = msg
self.pos = pos
self.char = char
self.lnum = lnum
self.brace_level = brace_level
self.line = line
def __str__(self):
return "Failed at line {0}, char '{1}', position {2}, "\
"brace level {3}: {4} (line: '{5}')"\
.format(
self.lnum,
self.char,
self.pos,
self.brace_level,
self.msg,
self.line,
)
class BaseLexer:
"""Base class for all bibpy lexers."""
def __init__(self):
"""Initialise the lexer."""
self._modes = {}
self._patterns = None
def reset(self, string):
"""Reset the internal state of the lexer."""
self.pos = 0
self.lastpos = 0
self.maxpos = len(string)
self.char = 1
self.lnum = 1
self.last_lnum = 1
self.brace_level = 0
self.ignore_whitespace = False
self.string = string
def _compile_regexes(self, patterns):
"""Compile a set of patterns into regular expressions."""
# Save a copy of the patterns that respects the order. We could also
# use a collections.OrderedDict, but this actually affected performance
# ever so slighty
self._iter_patterns = [
(name, (re.compile(pattern), f)) for name, (pattern, f) in patterns
]
# This is used for lookups
self._patterns = dict(self._iter_patterns)
@property
def patterns(self):
"""All patterns recognised by the lexer."""
return self._patterns
@property
def mode(self):
"""Return the current mode of the lexer."""
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def modes(self):
"""Return all modes that the lexer has."""
return self._modes
@property
def eos(self):
"""Return True if we have reached the end of the string."""
return self.pos >= self.maxpos
@property
def current_char(self):
"""Return the current character or None if no such character."""
if self.string and self.pos >= 0 and not self.eos:
return self.string[self.pos]
return None
def advance(self, match):
"""Advance the internal state based on a successful match."""
self.lastpos = self.pos
self.last_lnum = self.lnum
matched = match.group(0)
newlines = matched.count('\n')
self.pos = match.start(0) + len(matched)
self.lnum += newlines
if newlines == 0:
self.char += len(matched)
else:
self.char = len(matched) - matched.rfind('\n') - 1
def raise_error(self, msg):
"""Raise a lexer error with the given message."""
errline = self.string.splitlines()[self.lnum - 1]
raise LexerError(
msg, self.pos, self.char, self.lnum, self.brace_level, errline
)
def raise_unexpected(self, token):
"""Raise an error for an unexpected token."""
self.raise_error("Did not find expected token '{0}'".format(token))
def raise_unbalanced(self):
"""Raise an error for unbalanced braces."""
self.raise_error('Unbalanced braces')
def expect(self, token, strip_whitespace=True):
"""Expect a token, fail otherwise."""
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if not m:
self.raise_unexpected(token)
self.advance(m)
token_value = m.group(0)
if self.ignore_whitespace:
token_value = token_value.strip()
return self.make_token(token, token_value)
def until(self, token):
"""Scan until a particular token is found.
Return the part of the string that was scanned past and the string
value of the token. The latter is the entire rest of the string if the
token was not found.
"""
if token == 'braces':
pattern = re.compile(r'{|}')
elif token == 'parens':
pattern = re.compile(r'\(|\)')
else:
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if m:
scanned = m.group(0)
self.advance(m)
return self.string[self.lastpos:self.pos - 1], scanned
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
return rest, ''
def make_token(self, token_type, value):
"""Create a token type with a value."""
return Token(
token_type,
value,
(self.last_lnum, self.lastpos),
(self.lnum, self.pos)
)
def lex_string(self, value):
"""Lex a string and return a single token for it."""
return self.make_token('string', value)
def scan(self, search_type='search'):
"""Scan until any token recognised by this lexer is found.
Return the part of the string that was scanned past and the token
itself. The latter is the entire rest of the string if the token was
not found.
"""
for token_type, (pattern, handler) in self._iter_patterns:
# Not the most elegant but re.Pattern only exists in Python 3.7+ so
# we cannot pass the method as an argument
m = getattr(pattern, search_type)(self.string, self.pos)
if m:
self.advance(m)
value = m.group(0)
if self.ignore_whitespace and token_type == 'space':
break
token = handler(value) if handler else\
self.make_token(token_type, value)
yield self.string[self.lastpos:self.pos - len(value)], token
break
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
yield rest, None
def lex(self, string):
"""Lex a string and generate tokens."""
self.reset(string)
while not self.eos:
yield from self.modes[self.mode]()
| 29.32287
| 79
| 0.562777
|
6ed35db4d38694e7c783de4b97956c88c47c1f2f
| 791
|
py
|
Python
|
profiles_api/migrations/0002_profilefeeditem.py
|
MichaelPGudz/Profiles_REST_API
|
021521658fe6482aa3759e81c6ebb6246955194f
|
[
"MIT"
] | 1
|
2020-03-28T22:09:15.000Z
|
2020-03-28T22:09:15.000Z
|
profiles_api/migrations/0002_profilefeeditem.py
|
MichaelPGudz/Profiles_REST_API
|
021521658fe6482aa3759e81c6ebb6246955194f
|
[
"MIT"
] | 7
|
2020-03-29T20:14:47.000Z
|
2022-02-10T14:44:44.000Z
|
profiles_api/migrations/0002_profilefeeditem.py
|
MichaelPGudz/Profiles_REST_API
|
021521658fe6482aa3759e81c6ebb6246955194f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-04-01 19:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.64
| 126
| 0.633375
|
eed4809b8bdc97940cdb1788bdbe46e15f108964
| 2,604
|
py
|
Python
|
code/get_meteo_estaciones_cat.py
|
cesisar/TFM_UOC_2020
|
a8bdcc3eb9c36c623d8396dbfa4b1eb567eaea97
|
[
"MIT"
] | null | null | null |
code/get_meteo_estaciones_cat.py
|
cesisar/TFM_UOC_2020
|
a8bdcc3eb9c36c623d8396dbfa4b1eb567eaea97
|
[
"MIT"
] | null | null | null |
code/get_meteo_estaciones_cat.py
|
cesisar/TFM_UOC_2020
|
a8bdcc3eb9c36c623d8396dbfa4b1eb567eaea97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, argparse, os, logging
import pandas as pd
from datetime import datetime, timedelta
from sodapy import Socrata
def createLogger (logName = os.path.splitext(os.path.basename(__file__))[0], logLevel = logging.INFO):
FORMAT = '[%(asctime)-15s][%(levelname)s]: %(message)s'
logging.basicConfig(format=FORMAT,level=logLevel)
logger = logging.getLogger(logName)
return logger
logger = None #createLogger()
def run(argv):
global logger
date_format = lambda s: datetime.strptime(s, '%Y-%m-%d')
yesterday = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)-timedelta(days=1)
parser = argparse.ArgumentParser(description='Get meteo stations data for Catalonia.')
parser.add_argument('-o', '--oFolder', default=os.getcwd(), help='Output folder (default: current folder).')
parser.add_argument('-f', '--oFile', help='Output file (default: None). File is updated with new data.')
parser.add_argument('-d', '--debug', default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'], help='Logging level.')
#args = parser.parse_args(argv)
args, unknown = parser.parse_known_args()
oFolder = args.oFolder
oFile = args.oFile
lutLogLevel = {
"CRITICAL" : logging.CRITICAL,
"ERROR" : logging.ERROR,
"WARNING" : logging.WARNING,
"INFO" : logging.INFO,
"DEBUG" : logging.DEBUG,
"NOTSET" : logging.NOTSET }
logLevel = lutLogLevel[args.debug]
logger = createLogger()
logger.setLevel(logLevel)
get_meteo_estaciones_cat(oFolder, oFile)
exit(0)
def get_meteo_estaciones_cat(oFile=None):
global logger
# Metadatos estaciones meteorológicas automáticas
# https://analisi.transparenciacatalunya.cat/es/Medi-Ambient/Metadades-estacions-meteorol-giques-autom-tiques/yqwd-vj5e
dataset_identifier = "yqwd-vj5e"
with Socrata("analisi.transparenciacatalunya.cat", None) as client:
filename = oFile if oFile else "meteo_stations.json"
filePath = os.path.join(oFolder, filename)
logger.info("Write to file: " + filePath)
with open(filePath, 'w') as f:
results = client.get(dataset_identifier)
try:
f.write(str(results))
except Exception as inst:
logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
logger.error(inst)
pass
if __name__ == '__main__':
logger = createLogger()
run(sys.argv[1:])
| 39.454545
| 148
| 0.652458
|
c07f0355bbc57fd188633a98190f5a878b737c92
| 460
|
py
|
Python
|
chapter02/path_converter_demo/article/urls.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
chapter02/path_converter_demo/article/urls.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
chapter02/path_converter_demo/article/urls.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/26 23:21
# @Author : Tom_tao
# @Site :
# @File : urls.py
# @Software: PyCharm
from . import views
from django.urls import re_path,path
urlpatterns = [
path('',views.article),
# re_path(r'list/(?P<categories>\w+|(\w+\+\w+)+)/',views.article_list)
path("list/<cate:categories>",views.article_list,name='list'),
path('detail/<int:article_id>/',views.article_detail,name='detail')
]
| 27.058824
| 74
| 0.645652
|
220af7170a4cd352d0a0cfd12382a58ba1d892f0
| 577
|
py
|
Python
|
tests/oldtests/old/dbmaptest.py
|
ctb/pygr
|
a3a3e68073834c20ddbdb27ed746baf8c73fef0a
|
[
"BSD-3-Clause"
] | 2
|
2015-03-07T13:20:50.000Z
|
2015-11-04T12:01:21.000Z
|
tests/oldtests/old/dbmaptest.py
|
ctb/pygr
|
a3a3e68073834c20ddbdb27ed746baf8c73fef0a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/oldtests/old/dbmaptest.py
|
ctb/pygr
|
a3a3e68073834c20ddbdb27ed746baf8c73fef0a
|
[
"BSD-3-Clause"
] | null | null | null |
import MySQLdb
from pygr.seqdb import *
sp=BlastDB('sp') # OPEN SWISSPROT BLAST DB
s=Sequence(str(sp['CYGB_HUMAN'][40:-40]),'boo')
db=MySQLdb.Connection(db='test',read_default_file=os.environ['HOME']+'/.my.cnf')
cursor=db.cursor()
t=SQLTableMultiNoCache('test.mytable',cursor)
t._distinct_key='src_id'
m=StoredPathMapping(t,{'boo':s},sp)
for i in m[s].edges(): # SHOW ALL ALIGNMENTS TO s
print repr(i.srcPath),repr(i.destPath),i.blast_score
myg=sp['MYG_CHICK']
for i in m[s][myg]: # SHOW ALIGNMENT OF s AND myg
print repr(i.srcPath),repr(i.destPath),i.blast_score
| 28.85
| 80
| 0.719237
|
1c91d733f6dda667ac2491e6020cefbc2d2c094a
| 165
|
py
|
Python
|
onep/commands/__init__.py
|
apognu/1p
|
4e3840d166e43d96300045363ad5f59e0a22c52c
|
[
"MIT"
] | null | null | null |
onep/commands/__init__.py
|
apognu/1p
|
4e3840d166e43d96300045363ad5f59e0a22c52c
|
[
"MIT"
] | null | null | null |
onep/commands/__init__.py
|
apognu/1p
|
4e3840d166e43d96300045363ad5f59e0a22c52c
|
[
"MIT"
] | null | null | null |
from . import delete
from . import document
from . import edit
from . import search
from . import share
from . import signin
from . import show
from . import vaults
| 18.333333
| 22
| 0.757576
|
54eb74c3fc7feb26604ae7c639e0e3d29af7c7bb
| 7,555
|
py
|
Python
|
mistral/cmd/launch.py
|
lijianying10/mistral
|
c08cbf241cecb9ffb58adbc60b3fe43727e9d5e0
|
[
"Apache-2.0"
] | null | null | null |
mistral/cmd/launch.py
|
lijianying10/mistral
|
c08cbf241cecb9ffb58adbc60b3fe43727e9d5e0
|
[
"Apache-2.0"
] | null | null | null |
mistral/cmd/launch.py
|
lijianying10/mistral
|
c08cbf241cecb9ffb58adbc60b3fe43727e9d5e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 - Brocade Communications Systems, Inc.
# Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import eventlet
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=False if '--use-debugger' in sys.argv else True,
time=True)
import os
# If ../mistral/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'mistral', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from mistral.api import service as api_service
from mistral import config
from mistral.engine import engine_server
from mistral.event_engine import event_engine_server
from mistral.executors import executor_server
from mistral.notifiers import notification_server
from mistral.rpc import base as rpc
from mistral import version
CONF = cfg.CONF
SERVER_THREAD_MANAGER = None
SERVER_PROCESS_MANAGER = None
def launch_thread(server, workers=1):
try:
global SERVER_THREAD_MANAGER
if not SERVER_THREAD_MANAGER:
SERVER_THREAD_MANAGER = service.ServiceLauncher(CONF)
SERVER_THREAD_MANAGER.launch_service(server, workers=workers)
except Exception as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(1)
def launch_process(server, workers=1):
try:
global SERVER_PROCESS_MANAGER
if not SERVER_PROCESS_MANAGER:
SERVER_PROCESS_MANAGER = service.ProcessLauncher(CONF)
SERVER_PROCESS_MANAGER.launch_service(server, workers=workers)
except Exception as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(1)
def launch_executor():
launch_thread(executor_server.get_oslo_service())
def launch_engine():
launch_thread(engine_server.get_oslo_service())
def launch_event_engine():
launch_thread(event_engine_server.get_oslo_service())
def launch_notifier():
launch_thread(notification_server.get_oslo_service())
def launch_api():
server = api_service.WSGIService('mistral_api')
launch_process(server, workers=server.workers)
def launch_any(options):
for option in options:
LAUNCH_OPTIONS[option]()
global SERVER_PROCESS_MANAGER
global SERVER_THREAD_MANAGER
if SERVER_PROCESS_MANAGER:
SERVER_PROCESS_MANAGER.wait()
if SERVER_THREAD_MANAGER:
SERVER_THREAD_MANAGER.wait()
# Map cli options to appropriate functions. The cli options are
# registered in mistral's config.py.
LAUNCH_OPTIONS = {
'api': launch_api,
'engine': launch_engine,
'executor': launch_executor,
'event-engine': launch_event_engine,
'notifier': launch_notifier
}
MISTRAL_TITLE = """
|\\\ //| || ||
||\\\ //|| __ || __ __ ||
|| \\\// || || // |||||| || \\\ // \\\ ||
|| \\/ || \\\ || || || \\\ ||
|| || || \\\ || || || /\\\ ||
|| || || __// ||_// || \\\__// \\\_ ||
Mistral Workflow Service, version %s
""" % version.version_string()
def print_server_info():
print(MISTRAL_TITLE)
comp_str = ("[%s]" % ','.join(LAUNCH_OPTIONS
if cfg.CONF.server == ['all'] else cfg.CONF.server))
print('Launching server components %s...' % comp_str)
def get_properly_ordered_parameters():
"""Orders launch parameters in the right order.
In oslo it's important the order of the launch parameters.
if --config-file came after the command line parameters the command
line parameters are ignored.
So to make user command line parameters are never ignored this method
moves --config-file to be always first.
"""
args = sys.argv[1:]
for arg in sys.argv[1:]:
if arg == '--config-file' or arg.startswith('--config-file='):
if "=" in arg:
conf_file_value = arg.split("=", 1)[1]
else:
conf_file_value = args[args.index(arg) + 1]
args.remove(conf_file_value)
args.remove(arg)
args.insert(0, "--config-file")
args.insert(1, conf_file_value)
return args
def main():
try:
config.parse_args(get_properly_ordered_parameters())
print_server_info()
logging.setup(CONF, 'Mistral')
# Please refer to the oslo.messaging documentation for transport
# configuration. The default transport for oslo.messaging is
# rabbitMQ. The available transport drivers are listed in the
# setup.cfg file in oslo.messaging under the entry_points section for
# oslo.messaging.drivers. The transport driver is specified using the
# rpc_backend option in the default section of the oslo configuration
# file. The expected value for the rpc_backend is one of the key
# values available for the oslo.messaging.drivers (i.e. rabbit, fake).
# There are additional options such as ssl and credential that can be
# specified depending on the driver. Please refer to the driver
# implementation for those additional options. It's important to note
# that the "fake" transport should only be used if "all" the Mistral
# servers are launched on the same process. Otherwise, messages do not
# get delivered if the Mistral servers are launched on different
# processes because the "fake" transport is using an in process queue.
rpc.get_transport()
if cfg.CONF.server == ['all']:
# Launch all servers.
launch_any(LAUNCH_OPTIONS.keys())
else:
# Validate launch option.
if set(cfg.CONF.server) - set(LAUNCH_OPTIONS.keys()):
raise Exception('Valid options are all or any combination of '
', '.join(LAUNCH_OPTIONS.keys()))
# Launch distinct set of server(s).
launch_any(set(cfg.CONF.server))
except RuntimeError as excp:
sys.stderr.write("ERROR: %s\n" % excp)
sys.exit(1)
# Helper method used in unit tests to reset the service launchers.
def reset_server_managers():
global SERVER_THREAD_MANAGER
global SERVER_PROCESS_MANAGER
SERVER_THREAD_MANAGER = None
SERVER_PROCESS_MANAGER = None
# Helper method used in unit tests to access the service launcher.
def get_server_thread_manager():
global SERVER_THREAD_MANAGER
return SERVER_THREAD_MANAGER
# Helper method used in unit tests to access the process launcher.
def get_server_process_manager():
global SERVER_PROCESS_MANAGER
return SERVER_PROCESS_MANAGER
if __name__ == '__main__':
sys.exit(main())
| 31.219008
| 78
| 0.663534
|
81bdbd9a044873dc59a68f6048c24a75afafbc30
| 3,078
|
py
|
Python
|
main.py
|
abadala/EnglishToIsiNdebele
|
fd69ea4e24710c5d28ad4c111e9b6b0611c083e1
|
[
"MIT"
] | null | null | null |
main.py
|
abadala/EnglishToIsiNdebele
|
fd69ea4e24710c5d28ad4c111e9b6b0611c083e1
|
[
"MIT"
] | null | null | null |
main.py
|
abadala/EnglishToIsiNdebele
|
fd69ea4e24710c5d28ad4c111e9b6b0611c083e1
|
[
"MIT"
] | null | null | null |
import sys
import json
import datetime
# increases a users elder score with each contribution. Think through tiers.
def yongeza_ubudala():
return
# open a path for a user to provide an english word::and its translation
def translation_couresel():
# iterate over all words in word list
wordfile = open('words_dictionary.json', 'r') # switch to thesaurus
word_data = json.loads(wordfile.read())
translated_wordfile = open('translations.json', 'r')
prev_translations = []
current_translations = []
last_translation = '' # find where we left off last session
# find last translation index in word data
try:
# stuff
for word in word_data:
ndebele_translation = input(f'{word}: ')
if not ndebele_translation:
print(f'Skipping {word}')
continue
translation = {
'English': word,
'isiNdebeleTranslations': [
{
'translation': ndebele_translation.strip(),
'translationDateTime': str(datetime.datetime.now()), # convert to epoch millis
'translatedBy': 'Busani Sibusiso Qotho',
'upVotes': 0,
'downVotes': 0
}
]
}
print('Created translation: ', translation)
current_translations.append(translation)
except KeyboardInterrupt:
print('Process interrupted by user. Saving and exiting...')
# save and exit
except Exception as e:
print(e)
finally:
translations = prev_translations + current_translations
with open('translations.json', 'r') as f:
f.write(json.dumps({
'version': '0.0.1',
'data': translations
}))
# return
def free_form_translation():
# enter english::isiNdebele. If word already exists but different translation,
# append translation to translations list
eng_nde_translation = input('nde::eng > ')
# validate input, never trust user
nde, eng = eng_nde_translation.strip().split('::')
translation = {
'English': eng,
'lexical_category': 'noun',
'isiNdebeleTranslations': [
{
'isiNdebeleTranslation': nde.strip(),
'translationDateTime': str(datetime.datetime.now()), # convert to epoch millis
'translatedBy': 'Busani Sibusiso Qotho',
'upVotes': 0,
'downVotes': 0
}
]
}
print('UNPERSISTED_TRANSLATION: ', translation)
# persist_translation(translation)
return
# find map of word metadata, e.g, verb/adj, singular/plural,synonms. then let the translations
# inherit the english metadata.
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--freeform':
while True:
free_form_translation()
else:
translation_couresel()
| 30.475248
| 102
| 0.5705
|
f3e2630d508e0b45487402ec7d4836ca439be24a
| 12,096
|
py
|
Python
|
pypxe/tftp.py
|
vbyrd/PyPXE
|
9d36ecf985d4436000ec266c5b8ed2e7cffa19bf
|
[
"MIT"
] | null | null | null |
pypxe/tftp.py
|
vbyrd/PyPXE
|
9d36ecf985d4436000ec266c5b8ed2e7cffa19bf
|
[
"MIT"
] | null | null | null |
pypxe/tftp.py
|
vbyrd/PyPXE
|
9d36ecf985d4436000ec266c5b8ed2e7cffa19bf
|
[
"MIT"
] | null | null | null |
'''
This file contains classes and functions that implement the PyPXE TFTP service
'''
import socket
import struct
import os
import select
import time
import logging
import math
from pypxe import helpers
class ParentSocket(socket.socket):
'''Subclassed socket.socket to enable a link-back to the client object.'''
parent = None
class Client:
'''Client instance for TFTPD.'''
def __init__(self, mainsock, parent):
self.default_retries = parent.default_retries
self.timeout = parent.timeout
self.ip = parent.ip
self.message, self.address = mainsock.recvfrom(1024)
self.logger = helpers.get_child_logger(parent.logger, 'Client.{0}'.format(self.address))
self.netboot_directory = parent.netboot_directory
self.logger.debug('Receiving request...')
self.retries = self.default_retries
self.block = 1
self.blksize = 512
self.sent_time = float('inf')
self.dead = False
self.fh = None
self.filename = ''
self.wrap = 0
self.arm_wrap = False
self.handle() # message from the main socket
def ready(self):
'''Called when there is something to be read on our socket.'''
self.message = self.sock.recv(1024)
self.handle()
def send_block(self):
'''
Sends the next block of data, setting the timeout and retry
variables accordingly.
'''
data = None
try:
self.fh.seek(self.blksize * (self.block - 1))
data = self.fh.read(self.blksize)
except:
self.logger.error('Error while reading block {0}'.format(self.block))
self.dead = True
return
# opcode 3 == DATA, wraparound block number
response = struct.pack('!HH', 3, self.block % 65536)
response += data
self.sock.sendto(response, self.address)
self.logger.debug('Sending block {0}/{1}'.format(self.block, self.lastblock))
self.retries -= 1
self.sent_time = time.time()
def no_ack(self):
'''Determines if we timed out waiting for an ACK from the client.'''
if self.sent_time + self.timeout < time.time():
return True
return False
def no_retries(self):
'''Determines if the client ran out of retry attempts.'''
if not self.retries:
return True
return False
def valid_mode(self):
'''Determines if the file read mode octet; if not, send an error.'''
mode = self.message.split(b'\x00')[1]
if mode == b'octet': return True
self.send_error(5, 'Mode {0} not supported'.format(mode))
return False
def check_file(self):
'''
Determines if the file exists under the netboot_directory,
and if it is a file; if not, send an error.
'''
filename = self.message.split(b'\x00')[0].decode('ascii').lstrip('/')
try:
filename = helpers.normalize_path(self.netboot_directory, filename)
except helpers.PathTraversalException:
self.send_error(2, 'Path traversal error', filename = filename)
return False
if os.path.lexists(filename) and os.path.isfile(filename):
self.filename = filename
return True
self.send_error(1, 'File Not Found', filename = filename)
return False
def parse_options(self):
'''
Extracts the options sent from a client; if any, calculates the last
block based on the filesize and blocksize.
'''
options = self.message.split(b'\x00')[2: -1]
options = dict(zip((i.decode('ascii') for i in options[0::2]), map(int, options[1::2])))
self.changed_blksize = 'blksize' in options
if self.changed_blksize:
self.blksize = options['blksize']
self.lastblock = math.ceil(self.filesize / float(self.blksize))
self.tsize = True if 'tsize' in options else False
if self.filesize > (2 ** 16) * self.blksize:
self.logger.warning('Request too big, attempting transfer anyway.')
self.logger.debug('Details: Filesize {0} is too big for blksize {1}.'.format(self.filesize, self.blksize))
if len(options):
# we need to know later if we actually had any options
self.block = 0
return True
else:
return False
def reply_options(self):
'''Acknowledges any options received.'''
# only called if options, so send them all
response = struct.pack("!H", 6)
if self.changed_blksize:
response += b'blksize' + b'\x00'
response += str(self.blksize) + b'\x00'
if self.tsize:
response += b'tsize' + b'\x00'
response += str(self.filesize).encode('ascii') + b'\x00'
self.sock.sendto(response, self.address)
def new_request(self):
'''
When receiving a read request from the parent socket, open our
own socket and check the read request; if we don't have any options,
send the first block.
'''
self.sock = ParentSocket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, 0))
# used by select() to find ready clients
self.sock.parent = self
if not self.valid_mode() or not self.check_file():
# some clients just ACK the error (wrong code?)
# so forcefully shutdown
self.complete()
return
self.fh = open(self.filename, 'rb')
self.filesize = os.path.getsize(self.filename)
self.logger.info('File {0} ({1} bytes) requested'.format(self.filename, self.filesize))
if not self.parse_options():
# no options received so start transfer
if self.block == 1:
self.send_block()
return
self.reply_options() # we received some options so ACK those first
def send_error(self, code = 1, message = 'File Not Found', filename = ''):
'''
Sends an error code and string to a client. See RFC1350, page 10 for
details.
Value Meaning
===== =======
0 Not defined, see error message (if any).
1 File not found.
2 Access violation.
3 Disk full or allocation exceeded.
4 Illegal TFTP operation.
5 Unknown transfer ID.
6 File already exists.
7 No such user.
'''
response = struct.pack('!H', 5) # error opcode
response += struct.pack('!H', code) # error code
response += message.encode('ascii')
response += b'\x00'
self.sock.sendto(response, self.address)
self.logger.info('Sending {0}: {1} {2}'.format(code, message, filename))
def complete(self):
'''
Closes a file and socket after sending it
and marks ourselves as dead to be cleaned up.
'''
try:
self.fh.close()
except AttributeError:
pass # we have not opened yet or file-not-found
self.sock.close()
self.dead = True
def handle(self):
'''Takes the message from the parent socket and act accordingly.'''
# if addr not in ongoing, call this, else ready()
[opcode] = struct.unpack('!H', self.message[:2])
if opcode == 1:
self.message = self.message[2:]
self.new_request()
elif opcode == 4:
[block] = struct.unpack('!H', self.message[2:4])
if block == 0 and self.arm_wrap:
self.wrap += 1
self.arm_wrap = False
if block == 32768:
self.arm_wrap = True
if block < self.block % 65536:
self.logger.warning('Ignoring duplicated ACK received for block {0}'.format(self.block))
elif block > self.block % 65536:
self.logger.warning('Ignoring out of sequence ACK received for block {0}'.format(self.block))
elif block + self.wrap * 65536 == self.lastblock:
if self.filesize % self.blksize == 0:
self.block = block + 1
self.send_block()
self.logger.info('Completed sending {0}'.format(self.filename))
self.complete()
else:
self.block = block + 1
self.retries = self.default_retries
self.send_block()
elif opcode == 2:
# write request
self.sock = ParentSocket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, 0))
# used by select() to find ready clients
self.sock.parent = self
# send error
self.send_error(4, 'Write support not implemented')
self.dead = True
class TFTPD:
'''
This class implements a read-only TFTP server
implemented from RFC1350 and RFC2348
'''
def __init__(self, **server_settings):
self.ip = server_settings.get('ip', '0.0.0.0')
self.port = int(server_settings.get('port', 69))
self.netboot_directory = server_settings.get('netboot_directory', '.')
self.mode_verbose = server_settings.get('mode_verbose', False) # verbose mode
self.mode_debug = server_settings.get('mode_debug', False) # debug mode
self.logger = server_settings.get('logger', None)
self.default_retries = server_settings.get('default_retries', 3)
self.timeout = server_settings.get('timeout', 5)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
# setup logger
if self.logger == None:
self.logger = logging.getLogger('TFTP')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
if self.mode_debug:
self.logger.setLevel(logging.DEBUG)
elif self.mode_verbose:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.WARN)
self.logger.debug('NOTICE: TFTP server started in debug mode. TFTP server is using the following:')
self.logger.debug('Server IP: {0}'.format(self.ip))
self.logger.debug('Server Port: {0}'.format(self.port))
self.logger.debug('Network Boot Directory: {0}'.format(self.netboot_directory))
self.ongoing = []
def listen(self):
'''This method listens for incoming requests.'''
while True:
# remove complete clients to select doesn't fail
for client in self.ongoing:
if client.dead:
self.ongoing.remove(client)
rlist, _, _ = select.select([self.sock] + [client.sock for client in self.ongoing if not client.dead], [], [], 1)
for sock in rlist:
if sock == self.sock:
# main socket, so new client
self.ongoing.append(Client(sock, self))
else:
# client socket, so tell the client object it's ready
sock.parent.ready()
# if we haven't received an ACK in timeout time, retry
[client.send_block() for client in self.ongoing if client.no_ack()]
# if we have run out of retries, kill the client
for client in self.ongoing:
if client.no_retries():
client.logger.info('Timeout while sending {0}'.format(client.filename))
client.complete()
| 40.32
| 125
| 0.579613
|
d71fbde751e280a87b527f2eb57356cd1ad49813
| 28,491
|
py
|
Python
|
pyglet/gl/agl.py
|
SwineProject/pyglet
|
f0203870bef94d4349ad16f060c941d45270a0b5
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/gl/agl.py
|
SwineProject/pyglet
|
f0203870bef94d4349ad16f060c941d45270a0b5
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/gl/agl.py
|
SwineProject/pyglet
|
f0203870bef94d4349ad16f060c941d45270a0b5
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2018 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /System/Library/Frameworks/AGL.framework/Headers/agl.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: gengl.py 601 2007-02-04 05:36:59Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_AGL as _link_function
if not _link_function:
raise ImportError('AGL framework is not available.')
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /System/Library/Frameworks/AGL.framework/Headers/agl.h
AGL_VERSION_2_0 = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:41
class struct_GDevice(Structure):
__slots__ = [
]
struct_GDevice._fields_ = [
('_opaque_struct', c_int)
]
GDevice = struct_GDevice # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1347
GDPtr = POINTER(GDevice) # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1348
GDHandle = POINTER(GDPtr) # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1349
AGLDevice = GDHandle # /System/Library/Frameworks/AGL.framework/Headers/agl.h:46
class struct_OpaqueGrafPtr(Structure):
__slots__ = [
]
struct_OpaqueGrafPtr._fields_ = [
('_opaque_struct', c_int)
]
GrafPtr = POINTER(struct_OpaqueGrafPtr) # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1009
CGrafPtr = GrafPtr # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1392
AGLDrawable = CGrafPtr # /System/Library/Frameworks/AGL.framework/Headers/agl.h:51
class struct___AGLRendererInfoRec(Structure):
__slots__ = [
]
struct___AGLRendererInfoRec._fields_ = [
('_opaque_struct', c_int)
]
AGLRendererInfo = POINTER(struct___AGLRendererInfoRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:56
class struct___AGLPixelFormatRec(Structure):
__slots__ = [
]
struct___AGLPixelFormatRec._fields_ = [
('_opaque_struct', c_int)
]
AGLPixelFormat = POINTER(struct___AGLPixelFormatRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:57
class struct___AGLContextRec(Structure):
__slots__ = [
]
struct___AGLContextRec._fields_ = [
('_opaque_struct', c_int)
]
AGLContext = POINTER(struct___AGLContextRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:58
class struct___AGLPBufferRec(Structure):
__slots__ = [
]
struct___AGLPBufferRec._fields_ = [
('_opaque_struct', c_int)
]
AGLPbuffer = POINTER(struct___AGLPBufferRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:59
AGL_NONE = 0 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:66
AGL_ALL_RENDERERS = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:67
AGL_BUFFER_SIZE = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:68
AGL_LEVEL = 3 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:69
AGL_RGBA = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:70
AGL_DOUBLEBUFFER = 5 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:71
AGL_STEREO = 6 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:72
AGL_AUX_BUFFERS = 7 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:73
AGL_RED_SIZE = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:74
AGL_GREEN_SIZE = 9 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:75
AGL_BLUE_SIZE = 10 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:76
AGL_ALPHA_SIZE = 11 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:77
AGL_DEPTH_SIZE = 12 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:78
AGL_STENCIL_SIZE = 13 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:79
AGL_ACCUM_RED_SIZE = 14 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:80
AGL_ACCUM_GREEN_SIZE = 15 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:81
AGL_ACCUM_BLUE_SIZE = 16 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:82
AGL_ACCUM_ALPHA_SIZE = 17 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:83
AGL_PIXEL_SIZE = 50 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:88
AGL_MINIMUM_POLICY = 51 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:89
AGL_MAXIMUM_POLICY = 52 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:90
AGL_OFFSCREEN = 53 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:91
AGL_FULLSCREEN = 54 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:92
AGL_SAMPLE_BUFFERS_ARB = 55 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:93
AGL_SAMPLES_ARB = 56 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:94
AGL_AUX_DEPTH_STENCIL = 57 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:95
AGL_COLOR_FLOAT = 58 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:96
AGL_MULTISAMPLE = 59 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:97
AGL_SUPERSAMPLE = 60 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:98
AGL_SAMPLE_ALPHA = 61 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:99
AGL_RENDERER_ID = 70 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:104
AGL_SINGLE_RENDERER = 71 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:105
AGL_NO_RECOVERY = 72 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:106
AGL_ACCELERATED = 73 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:107
AGL_CLOSEST_POLICY = 74 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:108
AGL_ROBUST = 75 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:109
AGL_BACKING_STORE = 76 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:110
AGL_MP_SAFE = 78 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:111
AGL_WINDOW = 80 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:113
AGL_MULTISCREEN = 81 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:114
AGL_VIRTUAL_SCREEN = 82 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:115
AGL_COMPLIANT = 83 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:116
AGL_PBUFFER = 90 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:118
AGL_BUFFER_MODES = 100 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:135
AGL_MIN_LEVEL = 101 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:136
AGL_MAX_LEVEL = 102 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:137
AGL_COLOR_MODES = 103 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:138
AGL_ACCUM_MODES = 104 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:139
AGL_DEPTH_MODES = 105 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:140
AGL_STENCIL_MODES = 106 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:141
AGL_MAX_AUX_BUFFERS = 107 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:142
AGL_VIDEO_MEMORY = 120 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:143
AGL_TEXTURE_MEMORY = 121 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:144
AGL_RENDERER_COUNT = 128 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:145
AGL_SWAP_RECT = 200 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:150
AGL_BUFFER_RECT = 202 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:151
AGL_SWAP_LIMIT = 203 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:152
AGL_COLORMAP_TRACKING = 210 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:153
AGL_COLORMAP_ENTRY = 212 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:154
AGL_RASTERIZATION = 220 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:155
AGL_SWAP_INTERVAL = 222 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:156
AGL_STATE_VALIDATION = 230 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:157
AGL_BUFFER_NAME = 231 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:158
AGL_ORDER_CONTEXT_TO_FRONT = 232 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:159
AGL_CONTEXT_SURFACE_ID = 233 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:160
AGL_CONTEXT_DISPLAY_ID = 234 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:161
AGL_SURFACE_ORDER = 235 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:162
AGL_SURFACE_OPACITY = 236 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:163
AGL_CLIP_REGION = 254 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:164
AGL_FS_CAPTURE_SINGLE = 255 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:165
AGL_SURFACE_BACKING_SIZE = 304 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:166
AGL_ENABLE_SURFACE_BACKING_SIZE = 305 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:167
AGL_SURFACE_VOLATILE = 306 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:168
AGL_FORMAT_CACHE_SIZE = 501 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:172
AGL_CLEAR_FORMAT_CACHE = 502 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:173
AGL_RETAIN_RENDERERS = 503 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:174
AGL_MONOSCOPIC_BIT = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:177
AGL_STEREOSCOPIC_BIT = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:178
AGL_SINGLEBUFFER_BIT = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:179
AGL_DOUBLEBUFFER_BIT = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:180
AGL_0_BIT = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:183
AGL_1_BIT = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:184
AGL_2_BIT = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:185
AGL_3_BIT = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:186
AGL_4_BIT = 16 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:187
AGL_5_BIT = 32 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:188
AGL_6_BIT = 64 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:189
AGL_8_BIT = 128 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:190
AGL_10_BIT = 256 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:191
AGL_12_BIT = 512 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:192
AGL_16_BIT = 1024 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:193
AGL_24_BIT = 2048 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:194
AGL_32_BIT = 4096 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:195
AGL_48_BIT = 8192 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:196
AGL_64_BIT = 16384 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:197
AGL_96_BIT = 32768 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:198
AGL_128_BIT = 65536 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:199
AGL_RGB8_BIT = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:202
AGL_RGB8_A8_BIT = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:203
AGL_BGR233_BIT = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:204
AGL_BGR233_A8_BIT = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:205
AGL_RGB332_BIT = 16 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:206
AGL_RGB332_A8_BIT = 32 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:207
AGL_RGB444_BIT = 64 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:208
AGL_ARGB4444_BIT = 128 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:209
AGL_RGB444_A8_BIT = 256 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:210
AGL_RGB555_BIT = 512 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:211
AGL_ARGB1555_BIT = 1024 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:212
AGL_RGB555_A8_BIT = 2048 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:213
AGL_RGB565_BIT = 4096 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:214
AGL_RGB565_A8_BIT = 8192 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:215
AGL_RGB888_BIT = 16384 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:216
AGL_ARGB8888_BIT = 32768 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:217
AGL_RGB888_A8_BIT = 65536 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:218
AGL_RGB101010_BIT = 131072 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:219
AGL_ARGB2101010_BIT = 262144 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:220
AGL_RGB101010_A8_BIT = 524288 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:221
AGL_RGB121212_BIT = 1048576 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:222
AGL_ARGB12121212_BIT = 2097152 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:223
AGL_RGB161616_BIT = 4194304 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:224
AGL_ARGB16161616_BIT = 8388608 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:225
AGL_INDEX8_BIT = 536870912 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:226
AGL_INDEX16_BIT = 1073741824 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:227
AGL_RGBFLOAT64_BIT = 16777216 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:228
AGL_RGBAFLOAT64_BIT = 33554432 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:229
AGL_RGBFLOAT128_BIT = 67108864 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:230
AGL_RGBAFLOAT128_BIT = 134217728 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:231
AGL_RGBFLOAT256_BIT = 268435456 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:232
AGL_RGBAFLOAT256_BIT = 536870912 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:233
AGL_NO_ERROR = 0 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:238
AGL_BAD_ATTRIBUTE = 10000 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:240
AGL_BAD_PROPERTY = 10001 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:241
AGL_BAD_PIXELFMT = 10002 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:242
AGL_BAD_RENDINFO = 10003 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:243
AGL_BAD_CONTEXT = 10004 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:244
AGL_BAD_DRAWABLE = 10005 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:245
AGL_BAD_GDEV = 10006 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:246
AGL_BAD_STATE = 10007 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:247
AGL_BAD_VALUE = 10008 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:248
AGL_BAD_MATCH = 10009 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:249
AGL_BAD_ENUM = 10010 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:250
AGL_BAD_OFFSCREEN = 10011 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:251
AGL_BAD_FULLSCREEN = 10012 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:252
AGL_BAD_WINDOW = 10013 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:253
AGL_BAD_POINTER = 10014 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:254
AGL_BAD_MODULE = 10015 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:255
AGL_BAD_ALLOC = 10016 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:256
AGL_BAD_CONNECTION = 10017 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:257
GLint = c_long # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:47
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:264
aglChoosePixelFormat = _link_function('aglChoosePixelFormat', AGLPixelFormat, [POINTER(AGLDevice), GLint, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:265
aglDestroyPixelFormat = _link_function('aglDestroyPixelFormat', None, [AGLPixelFormat], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:266
aglNextPixelFormat = _link_function('aglNextPixelFormat', AGLPixelFormat, [AGLPixelFormat], None)
GLboolean = c_ubyte # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:43
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:267
aglDescribePixelFormat = _link_function('aglDescribePixelFormat', GLboolean, [AGLPixelFormat, GLint, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:268
aglDevicesOfPixelFormat = _link_function('aglDevicesOfPixelFormat', POINTER(AGLDevice), [AGLPixelFormat, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:273
aglQueryRendererInfo = _link_function('aglQueryRendererInfo', AGLRendererInfo, [POINTER(AGLDevice), GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:274
aglDestroyRendererInfo = _link_function('aglDestroyRendererInfo', None, [AGLRendererInfo], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:275
aglNextRendererInfo = _link_function('aglNextRendererInfo', AGLRendererInfo, [AGLRendererInfo], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:276
aglDescribeRenderer = _link_function('aglDescribeRenderer', GLboolean, [AGLRendererInfo, GLint, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:281
aglCreateContext = _link_function('aglCreateContext', AGLContext, [AGLPixelFormat, AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:282
aglDestroyContext = _link_function('aglDestroyContext', GLboolean, [AGLContext], None)
GLuint = c_ulong # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:51
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:283
aglCopyContext = _link_function('aglCopyContext', GLboolean, [AGLContext, AGLContext, GLuint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:284
aglUpdateContext = _link_function('aglUpdateContext', GLboolean, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:289
aglSetCurrentContext = _link_function('aglSetCurrentContext', GLboolean, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:290
aglGetCurrentContext = _link_function('aglGetCurrentContext', AGLContext, [], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:295
aglSetDrawable = _link_function('aglSetDrawable', GLboolean, [AGLContext, AGLDrawable], None)
GLsizei = c_long # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:48
GLvoid = None # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:56
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:296
aglSetOffScreen = _link_function('aglSetOffScreen', GLboolean, [AGLContext, GLsizei, GLsizei, GLsizei, POINTER(GLvoid)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:297
aglSetFullScreen = _link_function('aglSetFullScreen', GLboolean, [AGLContext, GLsizei, GLsizei, GLsizei, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:298
aglGetDrawable = _link_function('aglGetDrawable', AGLDrawable, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:303
aglSetVirtualScreen = _link_function('aglSetVirtualScreen', GLboolean, [AGLContext, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:304
aglGetVirtualScreen = _link_function('aglGetVirtualScreen', GLint, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:309
aglGetVersion = _link_function('aglGetVersion', None, [POINTER(GLint), POINTER(GLint)], None)
GLenum = c_ulong # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:42
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:314
aglConfigure = _link_function('aglConfigure', GLboolean, [GLenum, GLuint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:319
aglSwapBuffers = _link_function('aglSwapBuffers', None, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:324
aglEnable = _link_function('aglEnable', GLboolean, [AGLContext, GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:325
aglDisable = _link_function('aglDisable', GLboolean, [AGLContext, GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:326
aglIsEnabled = _link_function('aglIsEnabled', GLboolean, [AGLContext, GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:327
aglSetInteger = _link_function('aglSetInteger', GLboolean, [AGLContext, GLenum, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:328
aglGetInteger = _link_function('aglGetInteger', GLboolean, [AGLContext, GLenum, POINTER(GLint)], None)
Style = c_ubyte # /System/Library/Frameworks/CoreServices.framework/Headers/../Frameworks/CarbonCore.framework/Headers/MacTypes.h:524
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:333
aglUseFont = _link_function('aglUseFont', GLboolean, [AGLContext, GLint, Style, GLint, GLint, GLint, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:338
aglGetError = _link_function('aglGetError', GLenum, [], None)
GLubyte = c_ubyte # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:49
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:339
aglErrorString = _link_function('aglErrorString', POINTER(GLubyte), [GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:344
aglResetLibrary = _link_function('aglResetLibrary', None, [], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:349
aglSurfaceTexture = _link_function('aglSurfaceTexture', None, [AGLContext, GLenum, GLenum, AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:354
aglCreatePBuffer = _link_function('aglCreatePBuffer', GLboolean, [GLint, GLint, GLenum, GLenum, c_long, POINTER(AGLPbuffer)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:355
aglDestroyPBuffer = _link_function('aglDestroyPBuffer', GLboolean, [AGLPbuffer], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:356
aglDescribePBuffer = _link_function('aglDescribePBuffer', GLboolean, [AGLPbuffer, POINTER(GLint), POINTER(GLint), POINTER(GLenum), POINTER(GLenum), POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:357
aglTexImagePBuffer = _link_function('aglTexImagePBuffer', GLboolean, [AGLContext, AGLPbuffer, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:362
aglSetPBuffer = _link_function('aglSetPBuffer', GLboolean, [AGLContext, AGLPbuffer, GLint, GLint, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:363
aglGetPBuffer = _link_function('aglGetPBuffer', GLboolean, [AGLContext, POINTER(AGLPbuffer), POINTER(GLint), POINTER(GLint), POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:368
aglGetCGLContext = _link_function('aglGetCGLContext', GLboolean, [AGLContext, POINTER(POINTER(None))], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:369
aglGetCGLPixelFormat = _link_function('aglGetCGLPixelFormat', GLboolean, [AGLPixelFormat, POINTER(POINTER(None))], None)
__all__ = ['AGL_VERSION_2_0', 'AGLDevice', 'AGLDrawable', 'AGLRendererInfo',
'AGLPixelFormat', 'AGLContext', 'AGLPbuffer', 'AGL_NONE', 'AGL_ALL_RENDERERS',
'AGL_BUFFER_SIZE', 'AGL_LEVEL', 'AGL_RGBA', 'AGL_DOUBLEBUFFER', 'AGL_STEREO',
'AGL_AUX_BUFFERS', 'AGL_RED_SIZE', 'AGL_GREEN_SIZE', 'AGL_BLUE_SIZE',
'AGL_ALPHA_SIZE', 'AGL_DEPTH_SIZE', 'AGL_STENCIL_SIZE', 'AGL_ACCUM_RED_SIZE',
'AGL_ACCUM_GREEN_SIZE', 'AGL_ACCUM_BLUE_SIZE', 'AGL_ACCUM_ALPHA_SIZE',
'AGL_PIXEL_SIZE', 'AGL_MINIMUM_POLICY', 'AGL_MAXIMUM_POLICY', 'AGL_OFFSCREEN',
'AGL_FULLSCREEN', 'AGL_SAMPLE_BUFFERS_ARB', 'AGL_SAMPLES_ARB',
'AGL_AUX_DEPTH_STENCIL', 'AGL_COLOR_FLOAT', 'AGL_MULTISAMPLE',
'AGL_SUPERSAMPLE', 'AGL_SAMPLE_ALPHA', 'AGL_RENDERER_ID',
'AGL_SINGLE_RENDERER', 'AGL_NO_RECOVERY', 'AGL_ACCELERATED',
'AGL_CLOSEST_POLICY', 'AGL_ROBUST', 'AGL_BACKING_STORE', 'AGL_MP_SAFE',
'AGL_WINDOW', 'AGL_MULTISCREEN', 'AGL_VIRTUAL_SCREEN', 'AGL_COMPLIANT',
'AGL_PBUFFER', 'AGL_BUFFER_MODES', 'AGL_MIN_LEVEL', 'AGL_MAX_LEVEL',
'AGL_COLOR_MODES', 'AGL_ACCUM_MODES', 'AGL_DEPTH_MODES', 'AGL_STENCIL_MODES',
'AGL_MAX_AUX_BUFFERS', 'AGL_VIDEO_MEMORY', 'AGL_TEXTURE_MEMORY',
'AGL_RENDERER_COUNT', 'AGL_SWAP_RECT', 'AGL_BUFFER_RECT', 'AGL_SWAP_LIMIT',
'AGL_COLORMAP_TRACKING', 'AGL_COLORMAP_ENTRY', 'AGL_RASTERIZATION',
'AGL_SWAP_INTERVAL', 'AGL_STATE_VALIDATION', 'AGL_BUFFER_NAME',
'AGL_ORDER_CONTEXT_TO_FRONT', 'AGL_CONTEXT_SURFACE_ID',
'AGL_CONTEXT_DISPLAY_ID', 'AGL_SURFACE_ORDER', 'AGL_SURFACE_OPACITY',
'AGL_CLIP_REGION', 'AGL_FS_CAPTURE_SINGLE', 'AGL_SURFACE_BACKING_SIZE',
'AGL_ENABLE_SURFACE_BACKING_SIZE', 'AGL_SURFACE_VOLATILE',
'AGL_FORMAT_CACHE_SIZE', 'AGL_CLEAR_FORMAT_CACHE', 'AGL_RETAIN_RENDERERS',
'AGL_MONOSCOPIC_BIT', 'AGL_STEREOSCOPIC_BIT', 'AGL_SINGLEBUFFER_BIT',
'AGL_DOUBLEBUFFER_BIT', 'AGL_0_BIT', 'AGL_1_BIT', 'AGL_2_BIT', 'AGL_3_BIT',
'AGL_4_BIT', 'AGL_5_BIT', 'AGL_6_BIT', 'AGL_8_BIT', 'AGL_10_BIT',
'AGL_12_BIT', 'AGL_16_BIT', 'AGL_24_BIT', 'AGL_32_BIT', 'AGL_48_BIT',
'AGL_64_BIT', 'AGL_96_BIT', 'AGL_128_BIT', 'AGL_RGB8_BIT', 'AGL_RGB8_A8_BIT',
'AGL_BGR233_BIT', 'AGL_BGR233_A8_BIT', 'AGL_RGB332_BIT', 'AGL_RGB332_A8_BIT',
'AGL_RGB444_BIT', 'AGL_ARGB4444_BIT', 'AGL_RGB444_A8_BIT', 'AGL_RGB555_BIT',
'AGL_ARGB1555_BIT', 'AGL_RGB555_A8_BIT', 'AGL_RGB565_BIT',
'AGL_RGB565_A8_BIT', 'AGL_RGB888_BIT', 'AGL_ARGB8888_BIT',
'AGL_RGB888_A8_BIT', 'AGL_RGB101010_BIT', 'AGL_ARGB2101010_BIT',
'AGL_RGB101010_A8_BIT', 'AGL_RGB121212_BIT', 'AGL_ARGB12121212_BIT',
'AGL_RGB161616_BIT', 'AGL_ARGB16161616_BIT', 'AGL_INDEX8_BIT',
'AGL_INDEX16_BIT', 'AGL_RGBFLOAT64_BIT', 'AGL_RGBAFLOAT64_BIT',
'AGL_RGBFLOAT128_BIT', 'AGL_RGBAFLOAT128_BIT', 'AGL_RGBFLOAT256_BIT',
'AGL_RGBAFLOAT256_BIT', 'AGL_NO_ERROR', 'AGL_BAD_ATTRIBUTE',
'AGL_BAD_PROPERTY', 'AGL_BAD_PIXELFMT', 'AGL_BAD_RENDINFO', 'AGL_BAD_CONTEXT',
'AGL_BAD_DRAWABLE', 'AGL_BAD_GDEV', 'AGL_BAD_STATE', 'AGL_BAD_VALUE',
'AGL_BAD_MATCH', 'AGL_BAD_ENUM', 'AGL_BAD_OFFSCREEN', 'AGL_BAD_FULLSCREEN',
'AGL_BAD_WINDOW', 'AGL_BAD_POINTER', 'AGL_BAD_MODULE', 'AGL_BAD_ALLOC',
'AGL_BAD_CONNECTION', 'aglChoosePixelFormat', 'aglDestroyPixelFormat',
'aglNextPixelFormat', 'aglDescribePixelFormat', 'aglDevicesOfPixelFormat',
'aglQueryRendererInfo', 'aglDestroyRendererInfo', 'aglNextRendererInfo',
'aglDescribeRenderer', 'aglCreateContext', 'aglDestroyContext',
'aglCopyContext', 'aglUpdateContext', 'aglSetCurrentContext',
'aglGetCurrentContext', 'aglSetDrawable', 'aglSetOffScreen',
'aglSetFullScreen', 'aglGetDrawable', 'aglSetVirtualScreen',
'aglGetVirtualScreen', 'aglGetVersion', 'aglConfigure', 'aglSwapBuffers',
'aglEnable', 'aglDisable', 'aglIsEnabled', 'aglSetInteger', 'aglGetInteger',
'aglUseFont', 'aglGetError', 'aglErrorString', 'aglResetLibrary',
'aglSurfaceTexture', 'aglCreatePBuffer', 'aglDestroyPBuffer',
'aglDescribePBuffer', 'aglTexImagePBuffer', 'aglSetPBuffer', 'aglGetPBuffer',
'aglGetCGLContext', 'aglGetCGLPixelFormat']
# END GENERATED CONTENT (do not edit above this line)
| 63.172949
| 170
| 0.790214
|
f07f5770146f1dc6e16f4f09f8f57d7ec0a27079
| 3,267
|
py
|
Python
|
qa/rpc-tests/zmq_test.py
|
fujicoin/fujicoin-bitcore
|
bd4219c284e716c2326ba450cc3288ca691cd8b3
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/zmq_test.py
|
fujicoin/fujicoin-bitcore
|
bd4219c284e716c2326ba450cc3288ca691cd8b3
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/zmq_test.py
|
fujicoin/fujicoin-bitcore
|
bd4219c284e716c2326ba450cc3288ca691cd8b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import *
import zmq
import struct
import http.client
import urllib.parse
class ZMQTest (FujicoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
port = 28332
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
print("listen...")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
nseq = msg[2]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
blockcount += 1
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
| 32.346535
| 121
| 0.609428
|
8cc85cae55ac2729cb7c637c4cd110cd9932b049
| 9,038
|
py
|
Python
|
src/arclus/preprocessing/dataset_reader.py
|
fromm-m/ecir2021-am-search
|
58ed5fb86b8b3184fbbc4777c9e038768b728d13
|
[
"MIT"
] | 3
|
2021-02-15T19:38:41.000Z
|
2021-02-16T13:51:04.000Z
|
src/arclus/preprocessing/dataset_reader.py
|
fromm-m/ecir2021-am-search
|
58ed5fb86b8b3184fbbc4777c9e038768b728d13
|
[
"MIT"
] | null | null | null |
src/arclus/preprocessing/dataset_reader.py
|
fromm-m/ecir2021-am-search
|
58ed5fb86b8b3184fbbc4777c9e038768b728d13
|
[
"MIT"
] | 1
|
2021-02-16T13:52:08.000Z
|
2021-02-16T13:52:08.000Z
|
import json
import logging
import pathlib
from typing import Union, Optional
import pandas
from arclus.utils import is_blank
class DatasetReader:
"""General class for reading datasets."""
#: The root directory.
root: pathlib.Path
def __init__(
self,
root: Union[pathlib.Path, str],
claim_id: int = 0,
premise_id: int = 0,
):
if not isinstance(root, pathlib.Path):
root = pathlib.Path(root)
self.root = root
self.offset_claim_id = claim_id
self.offset_premise_id = premise_id
self._claims = []
self._premises = []
self._claim_premises = []
self._read()
def _add_claim(self, text: str) -> Optional[int]:
"""
Add a claim to the dataset.
:param text:
The claim text.
:return:
The claim ID.
"""
if is_blank(text=text):
return None
claim_id = len(self._claims) + self.offset_claim_id
self._claims.append(dict(
claim_text=text,
claim_id=claim_id,
source=self.name,
))
return claim_id
def _add_premise(self, text: str, stance: str) -> Optional[int]:
"""
Add a premise to the dataset.
:param text:
The premise text.
:return:
The premise ID.
"""
if is_blank(text=text):
return None
premise_id = len(self._premises) + self.offset_premise_id
self._premises.append(dict(
premise_text=text,
premise_id=premise_id,
stance=stance,
source=self.name,
))
return premise_id
def _add_claim_premise(self, premise_id: Optional[int], claim_id: Optional[int]) -> None:
"""
Add a link between premise_id and claim_id.
If any of the IDs is None, no link is added.
"""
if None not in {claim_id, premise_id}:
self._claim_premises.append(dict(
premise_id=premise_id,
claim_id=claim_id,
))
def __str__(self):
return f'Dataset(name={self.name}, num_claims={len(self._claims)}, num_premises={len(self._premises)})'
@property
def max_claim_id(self) -> int:
return self.offset_claim_id + len(self._claims)
@property
def max_premise_id(self) -> int:
return self.offset_premise_id + len(self._premises)
@property
def premises(self) -> pandas.DataFrame:
return pandas.DataFrame(self._premises)
@property
def claims(self) -> pandas.DataFrame:
return pandas.DataFrame(self._claims)
@property
def claims_premises(self) -> pandas.DataFrame:
return pandas.DataFrame(self._claim_premises)
@property
def name(self) -> str:
"""The name of the dataset."""
raise NotImplementedError
def _read(self):
"""Read the dataset."""
raise NotImplementedError
class DebatePediaOrgReader(DatasetReader):
"""DebatePediaOrg dataset."""
@property
def name(self) -> str:
return 'DebatePediaOrg'
def _read(self):
for index, file_path in enumerate(self.root.iterdir()):
with file_path.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
for claim_data in data:
if len(claim_data['pros']) + len(claim_data['cons']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = claim_data['claimText']
claim_id = self._add_claim(text=claim_text)
for premise_data, stance in [(claim, stance) for stance in ('Pro', 'Con') for claim in
claim_data[stance.lower() + 's']]:
premise_text = premise_data['premiseText']
premise_id = self._add_premise(text=premise_text, stance=stance)
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
class DebateOrgReader(DatasetReader):
"""DebateOrg dataset."""
@property
def name(self) -> str:
return 'DebateOrg'
def _read(self):
with self.root.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
for claim_data in data:
if len(claim_data['pros']) + len(claim_data['cons']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = claim_data['title']
claim_id = self._add_claim(claim_text)
for premise_data, stance in [(claim, stance) for stance in ('Pro', 'Con') for claim in
claim_data[stance.lower() + 's']]:
premise_text = premise_data['text']
premise_id = self._add_premise(text=premise_text, stance=stance)
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
class DebateWiseReader(DatasetReader):
"""DebateWise dataset."""
@property
def name(self) -> str:
return 'debatewise'
def _read(self):
for index, file_path in enumerate(self.root.iterdir()):
with file_path.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
if len(data['ArgumentList']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = data['MetaData']['Title']
claim_id = self._add_claim(text=claim_text)
for premise_data in data['ArgumentList']:
premise_text = premise_data['Argument']['Premise'][0]
premise_id = self._add_premise(text=premise_text,
stance=premise_data['Argument']['PremiseStance'][0])
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
class IDebateOrgReader(DatasetReader):
"""iDebateOrg dataset."""
@property
def name(self) -> str:
return 'iDebateOrg'
def _read(self):
for index, file_path in enumerate(self.root.iterdir()):
with file_path.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
if len(data['pros']) + len(data['cons']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = data['title']
claim_id = self._add_claim(claim_text)
for premise_data_pro in (data['pros']):
premise_text = premise_data_pro['text point pro claim']
premise_id = self._add_premise(text=premise_text, stance="Pro")
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
for premise_data_con in (data['cons']):
premise_text = premise_data_con['text point con claim']
premise_id = self._add_premise(text=premise_text, stance="Con")
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
def remove_duplicates(
premises: pandas.DataFrame,
claims: pandas.DataFrame,
assignments: pandas.DataFrame
) -> [pandas.DataFrame, pandas.DataFrame, pandas.DataFrame]:
"""
Remove duplicate premises and claims (w.r.t. text).
Update assignments:
- ids that belong to a duplicate have to be updated to the remaining id.
- then, duplicate assignments are removed
:param premises:
The premises.
:param claims:
The claims.
:param assignments:
The assignments.
:return:
The unique premises, claims and assignments.
"""
# extend assignments to have the premise and the claim text in df
ass_extended = pandas.merge(assignments, premises, how='inner', on="premise_id")
ass_extended = pandas.merge(ass_extended, claims, how='inner', on="claim_id")
# drop duplicates in claims and premises (first occurence is kept)
claims_df = claims.drop_duplicates(subset=["claim_text"])
premises_df = premises.drop_duplicates(subset=["premise_text"])
# extend assignments again by the now unique claim and premise text
ass_extended = pandas.merge(ass_extended, claims_df, how='inner', on="claim_text")
ass_extended = pandas.merge(ass_extended, premises_df, how='inner', on="premise_text")
# the newly added claim and premise ids are now the ids of the remaining ones
ass_extended = ass_extended[["premise_id_y", "claim_id_y"]]
# rename
ass_extended = ass_extended.rename(columns={"claim_id_y": "claim_id", "premise_id_y": "premise_id"})
# now drop all duplicate assignments
assignments_df = ass_extended.drop_duplicates(subset=["claim_id", "premise_id"])
return premises_df, claims_df, assignments_df
| 35.865079
| 111
| 0.594822
|
fd01d76c124c6b98976a2bf8e2bafd7ff83a384d
| 114
|
py
|
Python
|
testsmainnet/loantoken/test_LoanTokenLogicStandardEvents.py
|
DryptoBZX/contractsV2
|
3ee0b7669902ff6b9422440289ddc52f679e636b
|
[
"Apache-2.0"
] | 177
|
2020-06-13T01:41:04.000Z
|
2022-03-28T06:26:53.000Z
|
testsmainnet/loantoken/test_LoanTokenLogicStandardEvents.py
|
DryptoBZX/contractsV2
|
3ee0b7669902ff6b9422440289ddc52f679e636b
|
[
"Apache-2.0"
] | 31
|
2020-08-14T14:30:37.000Z
|
2022-03-15T15:36:25.000Z
|
testsmainnet/loantoken/test_LoanTokenLogicStandardEvents.py
|
DryptoBZX/contractsV2
|
3ee0b7669902ff6b9422440289ddc52f679e636b
|
[
"Apache-2.0"
] | 38
|
2020-06-24T22:24:40.000Z
|
2022-03-26T00:27:14.000Z
|
#!/usr/bin/python3
import pytest
from brownie import *
from fixedint import *
#from helpers import setupLoanPool
| 16.285714
| 34
| 0.789474
|
e13c1aa6efa29f621cc4ecddcfaacae59df4ef11
| 3,393
|
py
|
Python
|
examples/tutorials/contour_map.py
|
shahid-0/pygmt
|
fc8a30a99b81c4aa092e38669e5e4a39411ca78d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/tutorials/contour_map.py
|
shahid-0/pygmt
|
fc8a30a99b81c4aa092e38669e5e4a39411ca78d
|
[
"BSD-3-Clause"
] | 24
|
2021-02-12T08:12:30.000Z
|
2022-02-08T13:04:23.000Z
|
examples/tutorials/contour_map.py
|
shahid-0/pygmt
|
fc8a30a99b81c4aa092e38669e5e4a39411ca78d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Creating a map with contour lines
=================================
Plotting a contour map is handled by :meth:`pygmt.Figure.grdcontour`.
.. note::
This tutorial assumes the use of a Python notebook, such as IPython or Jupyter Notebook.
To see the figures while using a Python script instead, use
``fig.show(method="external")`` to display the figure in the default PDF viewer.
To save the figure, use ``fig.savefig("figname.pdf")`` where ``"figname.pdf"``
is the desired name and file extension for the saved figure.
"""
# sphinx_gallery_thumbnail_number = 5
import pygmt
# Load sample earth relief data
grid = pygmt.datasets.load_earth_relief(resolution="05m", region=[-92.5, -82.5, -3, 7])
########################################################################################
# Create contour plot
# -------------------
#
# The :meth:`pygmt.Figure.grdcontour` method takes the grid input.
# It plots annotated contour lines, which are thicker and have the
# elevation/depth written on them, and unannotated contour lines.
# In the example below, the default contour line intervals are 500 meters,
# with an annotated contour line every 1000 meters.
# By default, it plots the map with the
# equidistant cylindrical projection and with no frame.
fig = pygmt.Figure()
fig.grdcontour(grid=grid)
fig.show()
########################################################################################
# Contour line settings
# ---------------------
#
# Use the ``annotation`` and ``interval`` arguments to adjust contour line intervals.
# In the example below, there are contour intervals every 250 meters and
# annotated contour lines every 1,000 meters.
fig = pygmt.Figure()
fig.grdcontour(
annotation=1000,
interval=250,
grid=grid,
)
fig.show()
########################################################################################
# Contour limits
# --------------
#
# The ``limit`` argument sets the minimum and maximum values for the contour lines.
# The argument takes the low and high values,
# and is either a list (as below) or a string ``limit="-4000/-2000"``.
fig = pygmt.Figure()
fig.grdcontour(
annotation=1000,
interval=250,
grid=grid,
limit=[-4000, -2000],
)
fig.show()
########################################################################################
# Map settings
# ------------
#
# The :meth:`pygmt.Figure.grdcontour` method accepts additional arguments,
# including setting the projection and frame.
fig = pygmt.Figure()
fig.grdcontour(
annotation=1000,
interval=250,
grid=grid,
limit=[-4000, -2000],
projection="M10c",
frame=True,
)
fig.show()
########################################################################################
# Adding a colormap
# -----------------
#
# The :meth:`pygmt.Figure.grdimage` method can be used to add a
# colormap to the contour map. It must be called prior to
# :meth:`pygmt.Figure.grdcontour` to keep the contour lines visible on the final map.
# If the ``projection`` argument is specified in the :meth:`pygmt.Figure.grdimage`
# method, it does not need to be repeated in the :meth:`pygmt.Figure.grdcontour` method.
fig = pygmt.Figure()
fig.grdimage(
grid=grid,
cmap="haxby",
projection="M10c",
frame=True,
)
fig.grdcontour(
annotation=1000,
interval=250,
grid=grid,
limit=[-4000, -2000],
)
fig.show()
| 29.763158
| 92
| 0.59328
|
57874dc49e6fa803f1060da59c3c2474d42fb081
| 481
|
py
|
Python
|
gh/utils.py
|
voidabhi/GrowthHackersAPI
|
cf0b99dab24a2bb0bb0dc6ae09b2460ae950fedd
|
[
"MIT"
] | 6
|
2015-03-10T18:14:45.000Z
|
2019-04-24T05:56:56.000Z
|
gh/utils.py
|
voidabhi/GrowthHackersAPI
|
cf0b99dab24a2bb0bb0dc6ae09b2460ae950fedd
|
[
"MIT"
] | 10
|
2015-01-01T14:18:05.000Z
|
2016-05-11T18:04:45.000Z
|
gh/utils.py
|
voidabhi/GrowthHackersAPI
|
cf0b99dab24a2bb0bb0dc6ae09b2460ae950fedd
|
[
"MIT"
] | 3
|
2016-05-03T13:24:54.000Z
|
2019-04-24T05:57:07.000Z
|
import requests
from bs4 import BeautifulSoup
from constants import BASE_URL
def get_soup(page=''):
content = ''
try:
content = requests.get('%s/%s/' % (BASE_URL,page)).text
except requests.exceptions.ConnectionError as e:
content = requests.get('%s/%s' % (BASE_URL,page)).text
finally:
return BeautifulSoup(content.encode('utf8'))
def get_user_soup(user_id=''):
"""
Returns a bs4 object of the requested user
"""
return get_soup(page='member/%s'%user_id)
| 21.863636
| 57
| 0.704782
|
5bc3660e6bb718e5455b5a2e094cdab088374c52
| 9,944
|
py
|
Python
|
pims/spe_stack.py
|
nkeim/pims
|
3c8f7832113517a37ddd65a2803a6b17f9c33e4c
|
[
"BSD-3-Clause"
] | 2
|
2021-01-31T23:58:44.000Z
|
2021-09-15T06:10:06.000Z
|
pims/spe_stack.py
|
nkeim/pims
|
3c8f7832113517a37ddd65a2803a6b17f9c33e4c
|
[
"BSD-3-Clause"
] | null | null | null |
pims/spe_stack.py
|
nkeim/pims
|
3c8f7832113517a37ddd65a2803a6b17f9c33e4c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import warnings
import numpy as np
from .frame import Frame
from .base_frames import FramesSequence
class Spec(object):
"""SPE file specification data
Tuples of (offset, datatype, count), where offset is the offset in the SPE
file and datatype is the datatype as used in `numpy.fromfile`()
`data_start` is the offset of actual image data.
`dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0]
is dtype("<f") (which is np.float32).
`controllers` maps the `type` metadata to a human readable name
`readout_modes` maps the `readoutMode` metadata to something human readable
although this may not be accurate since there is next to no documentation
to be found.
"""
metadata = {
#essential information
"datatype": (108, "<h"), #dtypes
"xdim": (42, "<H"),
"ydim": (656, "<H"),
"NumFrames": (1446, "<i"),
#ROI information
"NumROIsInExperiment": (1488, "<h"),
"NumROI": (1510, "<h"),
"ROIs": (1512, np.dtype([("startx", "<H"),
("endx", "<H"),
("groupx", "<H"),
("starty", "<H"),
("endy", "<H"),
("groupy", "<H")]), 10),
#chip-related sizes
"xDimDet": (6, "<H"),
"yDimDet": (18, "<H"),
"VChipXdim": (14, "<h"),
"VChipYdim": (16, "<h"),
#other stuff
"ControllerVersion": (0, "<h"),
"LogicOutput": (2, "<h"),
"AmpHiCapLowNoise": (4, "<H"), #enum?
"mode": (8, "<h"), #enum?
"exp_sec": (10, "<f"),
"date": (20, "<10S"),
"DetTemperature": (36, "<f"),
"DetType": (40, "<h"),
"stdiode": (44, "<h"),
"DelayTime": (46, "<f"),
"ShutterControl": (50, "<H"), #normal, disabled open, disabled closed
#but which one is which?
"AbsorbLive": (52, "<h"), #bool?
"AbsorbMode": (54, "<H"),
"CanDoVirtualChipFlag": (56, "<h"), #bool?
"ThresholdMinLive": (58, "<h"), #bool?
"ThresholdMinVal": (60, "<f"),
"ThresholdMinLive": (64, "<h"), #bool?
"ThresholdMinVal": (66, "<f"),
"ExperimentTimeLocal": (172, "<7S"),
"ExperimentTimeUTC": (179, "<7S"),
"ADCoffset": (188, "<H"),
"ADCrate": (190, "<H"),
"ADCtype": (192, "<H"),
"ADCresolution": (194, "<H"),
"ADCbitAdjust": (196, "<H"),
"gain": (198, "<H"),
"comments": (200, "<80S", 5),
"geometric": (600, "<H"), #flags
"swversion": (688, "<16S"),
"spare4": (742, "<436S"),
"XPrePixels": (98, "<h"),
"XPostPixels": (100, "<h"),
"YPrePixels": (102, "<h"),
"YPostPixels": (104, "<h"),
"ReadoutTime": (672, "<f"),
"type": (704, "<h"), #controllers
"clkspd_us": (1428, "<f"),
"readoutMode": (1480, "<H"), #readout_modes
"WindowSize": (1482, "<H"),
"file_header_ver": (1992, "<f")
}
data_start = 4100
dtypes = [np.dtype("<f"), np.dtype("<i"), np.dtype("<h"),
np.dtype("<H"), np.dtype("<I")]
controllers = [
"new120 (Type II)", "old120 (Type I)", "ST130", "ST121", "ST138",
"DC131 (PentaMax)", "ST133 (MicroMax/Roper)", "ST135 (GPIB)", "VTCCD",
"ST116 (GPIB)", "OMA3 (GPIB)", "OMA4"
]
#This was gathered from random places on the internet and own experiments
#with the camera. May not be accurate.
readout_modes = ["full frame", "frame transfer", "kinetics"]
#do not decode the following metadata keys into strings, but leave them
#as byte arrays
no_decode = ["spare4"]
class SpeStack(FramesSequence):
"""Read image data from SPE files
Attributes
----------
default_char_encoding : string
Default character encoding used to decode metadata strings. This is a
class attribute. By setting `SpeStack.default_char_encoding =
"my_encoding"`, "my_encoding" will be used as a default in all SpeStack
instances thereafter, unless a different one is explicitly passed to
the constructor. Defaults to "latin1".
metadata : dict
Contains additional metadata.
"""
default_char_encoding = "latin1"
@classmethod
def class_exts(cls):
return {"spe"} | super(SpeStack, cls).class_exts()
def __init__(self, filename, char_encoding=None, check_filesize=True):
"""Create an iterable object that returns image data as numpy arrays
Arguments
---------
filename : string
Name of the SPE file
char_encoding : str or None, optional
Specifies what character encoding is used to decode metatdata
strings. If None, use the `default_char_encoding` class attribute.
Defaults to None.
check_filesize : bool, optional
The number of frames in an SPE file should be recorded in the
file's header. Some software fails to do so correctly. If
`check_filesize` is `True`, calculate the number of frames from
the file size. A warning is emitted if this doesn't match the
number of frames from the file header. Defaults to True.
"""
self._filename = filename
self._file = open(filename, "rb")
self._char_encoding = (char_encoding if char_encoding is not None
else self.default_char_encoding)
### Read metadata ###
self.metadata = {}
#Decode each string from the numpy array read by np.fromfile
decode = np.vectorize(lambda x: x.decode(self._char_encoding))
for name, sp in Spec.metadata.items():
self._file.seek(sp[0])
cnt = (1 if len(sp) < 3 else sp[2])
v = np.fromfile(self._file, dtype=sp[1], count=cnt)
if v.dtype.kind == "S" and name not in Spec.no_decode:
#silently ignore string decoding failures
try:
v = decode(v)
except:
pass
if cnt == 1:
#for convenience, if the array contains only one single entry,
#return this entry itself.
v = np.asscalar(v)
self.metadata[name] = v
### Some metadata is "special", deal with it
#Determine data type
self._dtype = Spec.dtypes[self.metadata.pop("datatype")]
#movie dimensions
self._width = self.metadata.pop("xdim")
self._height = self.metadata.pop("ydim")
self._len = self.metadata.pop("NumFrames")
if check_filesize:
# Some software writes incorrecet `NumFrames` metadata
# Use the file size to determine the number of frames
fsz = os.path.getsize(filename)
l = fsz - Spec.data_start
l //= self._width * self._height * self._dtype.itemsize
if l != self._len:
warnings.warn("Number of frames according to file header "
"does not match the size of file " +
filename + ".")
self._len = min(l, self._len)
#The number of ROIs is given in the SPE file. Only return as many
#ROIs as given
num_rois = self.metadata.pop("NumROI", None)
num_rois = (1 if num_rois < 1 else num_rois)
self.metadata["ROIs"] = self.metadata["ROIs"][:num_rois]
#chip sizes
self.metadata["ChipSize"] = (self.metadata.pop("xDimDet", None),
self.metadata.pop("yDimDet", None))
self.metadata["VirtChipSize"] = (self.metadata.pop("VChipXdim", None),
self.metadata.pop("VChipYdim", None))
#geometric operations
g = []
f = self.metadata.pop("geometric", 0)
if f & 1:
g.append("rotate")
if f & 2:
g.append("reverse")
if f & 4:
g.append("flip")
self.metadata["geometric"] = g
#Make some additional information more human-readable
t = self.metadata["type"]
if 1 <= t <= len(Spec.controllers):
self.metadata["type"] = Spec.controllers[t - 1]
else:
self.metadata.pop("type", None)
m = self.metadata["readoutMode"]
if 1 <= m <= len(Spec.readout_modes):
self.metadata["readoutMode"] = Spec.readout_modes[m - 1]
else:
self.metadata.pop("readoutMode", None)
@property
def frame_shape(self):
return self._height, self._width
def __len__(self):
return self._len
def get_frame(self, j):
if j >= self._len:
raise ValueError("Frame number {} out of range.".format(j))
self._file.seek(Spec.data_start
+ j*self._width*self._height*self.pixel_type.itemsize)
data = np.fromfile(self._file, dtype=self.pixel_type,
count=self._width*self._height)
return Frame(data.reshape(self._height, self._width),
frame_no=j, metadata=self.metadata)
def close(self):
"""Clean up and close file"""
super(SpeStack, self).close()
self._file.close()
@property
def pixel_type(self):
return self._dtype
def __repr__(self):
return """<Frames>
Source: {filename}
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self._width,
h=self._height,
count=self._len,
filename=self._filename,
dtype=self._dtype)
| 36.558824
| 79
| 0.536806
|
3e98d847c563aff1a6db1d21a85858dfb8b15f58
| 10,051
|
py
|
Python
|
depthai_sdk/src/depthai_sdk/utils.py
|
B-AROL-O/depthai
|
ad238dc652cafff1d5c02e89b5ba2f31ab483143
|
[
"MIT"
] | null | null | null |
depthai_sdk/src/depthai_sdk/utils.py
|
B-AROL-O/depthai
|
ad238dc652cafff1d5c02e89b5ba2f31ab483143
|
[
"MIT"
] | null | null | null |
depthai_sdk/src/depthai_sdk/utils.py
|
B-AROL-O/depthai
|
ad238dc652cafff1d5c02e89b5ba2f31ab483143
|
[
"MIT"
] | null | null | null |
import importlib
import sys
from pathlib import Path
import urllib.request
import cv2
import numpy as np
import depthai as dai
import datetime as dt
from heapq import heappop, heappush
import threading
def cosDist(a, b):
"""
Calculates cosine distance - https://en.wikipedia.org/wiki/Cosine_similarity
"""
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def frameNorm(frame, bbox):
"""
Mapps bounding box coordinates (0..1) to pixel values on frame
Args:
frame (numpy.ndarray): Frame to which adjust the bounding box
bbox (list): list of bounding box points in a form of :code:`[x1, y1, x2, y2, ...]`
Returns:
list: Bounding box points mapped to pixel values on frame
"""
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
def toPlanar(arr: np.ndarray, shape: tuple = None) -> np.ndarray:
"""
Converts interleaved frame into planar
Args:
arr (numpy.ndarray): Interleaved frame
shape (tuple, optional): If provided, the interleaved frame will be scaled to specified shape before converting into planar
Returns:
numpy.ndarray: Planar frame
"""
if shape is None:
return arr.transpose(2, 0, 1)
return cv2.resize(arr, shape).transpose(2, 0, 1)
def toTensorResult(packet):
"""
Converts NN packet to dict, with each key being output tensor name and each value being correctly reshaped and converted results array
Useful as a first step of processing NN results for custom neural networks
Args:
packet (depthai.NNData): Packet returned from NN node
Returns:
dict: Dict containing prepared output tensors
"""
data = {}
for tensor in packet.getRaw().tensors:
if tensor.dataType == dai.TensorInfo.DataType.INT:
data[tensor.name] = np.array(packet.getLayerInt32(tensor.name)).reshape(tensor.dims)
elif tensor.dataType == dai.TensorInfo.DataType.FP16:
data[tensor.name] = np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)
elif tensor.dataType == dai.TensorInfo.DataType.I8:
data[tensor.name] = np.array(packet.getLayerUInt8(tensor.name)).reshape(tensor.dims)
else:
print("Unsupported tensor layer type: {}".format(tensor.dataType))
return data
def merge(source:dict, destination:dict):
"""
Utility function to merge two dictionaries
.. code-block:: python
a = { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1' } } }
b = { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5' } } }
print(merge(b, a))
# { 'first' : { 'all_rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }
Args:
source (dict): first dict to merge
destination (dict): second dict to merge
Returns:
dict: merged dict
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(value, node)
else:
destination[key] = value
return destination
def loadModule(path: Path):
"""
Loads module from specified path. Used internally e.g. to load a custom handler file from path
Args:
path (pathlib.Path): path to the module to be loaded
Returns:
module: loaded module from provided path
"""
spec = importlib.util.spec_from_file_location(path.stem, str(path.absolute()))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def getDeviceInfo(deviceId=None, debug=False):
"""
Find a correct :obj:`depthai.DeviceInfo` object, either matching provided :code:`deviceId` or selected by the user (if multiple devices available)
Useful for almost every app where there is a possibility of multiple devices being connected simultaneously
Args:
deviceId (str, optional): Specifies device MX ID, for which the device info will be collected
Returns:
depthai.DeviceInfo: Object representing selected device info
Raises:
RuntimeError: if no DepthAI device was found or, if :code:`deviceId` was specified, no device with matching MX ID was found
ValueError: if value supplied by the user when choosing the DepthAI device was incorrect
"""
deviceInfos = []
if debug:
deviceInfos = dai.XLinkConnection.getAllConnectedDevices()
else:
deviceInfos = dai.Device.getAllAvailableDevices()
if len(deviceInfos) == 0:
raise RuntimeError("No DepthAI device found!")
else:
print("Available devices:")
for i, deviceInfo in enumerate(deviceInfos):
print(f"[{i}] {deviceInfo.getMxId()} [{deviceInfo.state.name}]")
if deviceId == "list":
raise SystemExit(0)
elif deviceId is not None:
matchingDevice = next(filter(lambda info: info.getMxId() == deviceId, deviceInfos), None)
if matchingDevice is None:
raise RuntimeError(f"No DepthAI device found with id matching {deviceId} !")
return matchingDevice
elif len(deviceInfos) == 1:
return deviceInfos[0]
else:
val = input("Which DepthAI Device you want to use: ")
try:
return deviceInfos[int(val)]
except:
raise ValueError("Incorrect value supplied: {}".format(val))
def showProgress(curr, max):
"""
Print progressbar to stdout. Each call to this method will write exactly to the same line, so usually it's used as
.. code-block:: python
print("Staring processing")
while processing:
showProgress(currProgress, maxProgress)
print(" done") # prints in the same line as progress bar and adds a new line
print("Processing finished!")
Args:
curr (int): Current position on progress bar
max (int): Maximum position on progress bar
"""
done = int(50 * curr / max)
sys.stdout.write("\r[{}{}] ".format('=' * done, ' ' * (50-done)) )
sys.stdout.flush()
def downloadYTVideo(video, outputDir=None):
"""
Downloads a video from YouTube and returns the path to video. Will choose the best resolutuion if possible.
Args:
video (str): URL to YouTube video
outputDir (pathlib.Path, optional): Path to directory where youtube video should be downloaded.
Returns:
pathlib.Path: Path to downloaded video file
Raises:
RuntimeError: thrown when video download was unsuccessful
"""
def progressFunc(stream, chunk, bytesRemaining):
showProgress(stream.filesize - bytesRemaining, stream.filesize)
try:
from pytube import YouTube
except ImportError as ex:
raise RuntimeError("Unable to use YouTube video due to the following import error: {}".format(ex))
path = None
for _ in range(10):
try:
path = YouTube(video, on_progress_callback=progressFunc)\
.streams\
.order_by('resolution')\
.desc()\
.first()\
.download(output_path=outputDir)
except urllib.error.HTTPError:
# TODO remove when this issue is resolved - https://github.com/pytube/pytube/issues/990
# Often, downloading YT video will fail with 404 exception, but sometimes it's successful
pass
else:
break
if path is None:
raise RuntimeError("Unable to download YouTube video. Please try again")
return path
def cropToAspectRatio(frame, size):
"""
Crop the frame to desired aspect ratio and then scales it down to desired size
Args:
frame (numpy.ndarray): Source frame that will be cropped
size (tuple): Desired frame size (width, height)
Returns:
numpy.ndarray: Cropped frame
"""
shape = frame.shape
h = shape[0]
w = shape[1]
currentRatio = w / h
newRatio = size[0] / size[1]
# Crop width/height to match the aspect ratio needed by the NN
if newRatio < currentRatio: # Crop width
# Use full height, crop width
newW = (newRatio/currentRatio) * w
crop = int((w - newW) / 2)
return frame[:, crop:w-crop]
else: # Crop height
# Use full width, crop height
newH = (currentRatio/newRatio) * h
crop = int((h - newH) / 2)
return frame[crop:h-crop, :]
def resizeLetterbox(frame, size):
"""
Transforms the frame to meet the desired size, preserving the aspect ratio and adding black borders (letterboxing)
Args:
frame (numpy.ndarray): Source frame that will be resized
size (tuple): Desired frame size (width, height)
Returns:
numpy.ndarray: Resized frame
"""
border_v = 0
border_h = 0
if (size[1] / size[0]) >= (frame.shape[0] / frame.shape[1]):
border_v = int((((size[1] / size[0]) * frame.shape[1]) - frame.shape[0]) / 2)
else:
border_h = int((((size[0] / size[1]) * frame.shape[0]) - frame.shape[1]) / 2)
frame = cv2.copyMakeBorder(frame, border_v, border_v, border_h, border_h, cv2.BORDER_CONSTANT, 0)
return cv2.resize(frame, size)
def createBlankFrame(width, height, rgb_color=(0, 0, 0)):
"""
Create new image(numpy array) filled with certain color in RGB
Args:
width (int): New frame width
height (int): New frame height
rgb_color (tuple, Optional): Specify frame fill color in RGB format (default (0,0,0) - black)
Returns:
numpy.ndarray: New frame filled with specified color
"""
# Create black blank image
image = np.zeros((height, width, 3), np.uint8)
# Since OpenCV uses BGR, convert the color first
color = tuple(reversed(rgb_color))
# Fill image with color
image[:] = color
return image
| 33.615385
| 150
| 0.63327
|
5fdb3b356d97c24ea2ca7d6c38f8c9ba57c76f4c
| 1,604
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_combined07.py
|
edparcell/XlsxWriter
|
d6a5df232ac0091017ae5c65f592bcc776d296ea
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-01-09T19:43:43.000Z
|
2019-01-09T19:43:43.000Z
|
xlsxwriter/test/comparison/test_chart_combined07.py
|
edparcell/XlsxWriter
|
d6a5df232ac0091017ae5c65f592bcc776d296ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_combined07.py
|
edparcell/XlsxWriter
|
d6a5df232ac0091017ae5c65f592bcc776d296ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_combined07.xlsx')
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:dispBlanksAs']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({'type': 'column'})
chart2 = workbook.add_chart({'type': 'scatter'})
chart1.axis_ids = [81267328, 81297792]
chart2.axis_ids = [81267328, 81297792]
data = [
[2, 3, 4, 5, 6],
[20, 25, 10, 10, 20],
[5, 10, 15, 10, 5],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart1.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5'
})
chart2.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5'
})
chart1.combine(chart2)
worksheet.insert_chart('E9', chart1)
workbook.close()
self.assertExcelEqual()
| 25.460317
| 79
| 0.548628
|
39989510aaa2d1640e0855dc25c3c2d2ef3af6f6
| 7,818
|
py
|
Python
|
Vault7/Lost-in-Translation/windows/ReplayWizard.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | 46
|
2017-05-15T11:15:08.000Z
|
2018-07-02T03:32:52.000Z
|
Vault7/Lost-in-Translation/windows/ReplayWizard.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | null | null | null |
Vault7/Lost-in-Translation/windows/ReplayWizard.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | 24
|
2017-05-17T03:26:17.000Z
|
2018-07-09T07:00:50.000Z
|
from __future__ import print_function
import glob
import math
import os
import subprocess
import sys
import time
import traceback
SLEEP = 5
LETTERS = 'BCDEFGHIJKLMNOPQRSTUVWXYZ'
REFRESH_LIST = 'REFRESH LIST'
REPLAY_DRIVE = 'D:\\'
MAIN_HEADER = "\nReplayWizard 2013-05-09\n\nLEAVE THIS WINDOW OPEN WHILE YOU WORK!\n\nIf you accidently close this window, re-run this script.\n\nThe following menu will help you launch an appropriate replay\nview for each shared drive and project. Use 'REFRESH LIST' to \nupdate the list in cases where the project directories have not \nyet been created on the WinOp station.\n\nWhen the op is complete, close all DSZ windows and choose QUIT.\n"
def main():
print(MAIN_HEADER)
first_run = True
sentinel_children = []
while True:
drive_menu = [('%s:' % x) for x in LETTERS if glob.glob(('%s:/DSZOpsDisk*' % x))]
if (first_run and (len(drive_menu) == 1)):
choice = drive_menu[0]
print(('Only one share found, selecting %s drive...' % choice))
first_run = False
else:
choice = menu(drive_menu, quitmsg='QUIT', text="Select the drive or share with the Windows OPS disk you'd like to use:")
if (choice is None):
for child in sentinel_children:
print(('Stopping sentinel process: %s' % child.pid))
child.terminate()
break
if (choice == REFRESH_LIST):
continue
ops_disk_path = glob.glob(('%s/DSZOpsDisk*' % choice))[0]
replay_disk_path = os.path.join(REPLAY_DRIVE, 'ReplayDisk')
if (not os.path.exists(replay_disk_path)):
create_replay_disk(ops_disk_path, replay_disk_path)
logs_path = os.path.join(os.path.dirname(ops_disk_path), 'Logs')
if (not os.path.exists(logs_path)):
print(("\nCouldn't find the logs dir at %s" % logs_path))
print('\nTry again when the WinOP DSZ GUI has launched.\n')
continue
output_dir = os.path.dirname(replay_disk_path)
sentinel_child = sentinel_prompts(ops_disk_path, output_dir)
sentinel_children.append(sentinel_child)
project_menu(ops_disk_path, replay_disk_path)
def create_replay_disk(ops_disk_path, replay_disk_path):
print(('\nCopying replay from %s (may take a moment)...' % ops_disk_path))
os.chdir(ops_disk_path)
script_path = os.path.join(ops_disk_path, 'CreateReplay.py')
proc = subprocess.Popen(('python %s' % script_path), stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
proc.communicate(('%s\n' % replay_disk_path))
def sentinel_prompts(ops_disk_path, output_dir):
get_files = False
if yn_prompt('\nAutomatically rename GetFiles to closely match on-target names?'):
print(('\nFiles will save to %sGetFiles_Renamed' % output_dir))
get_files = True
host_map = False
if yn_prompt('\nAutomatically aggregate network info across targets?'):
print(('\nFiles will save to %shostmap.txt' % output_dir))
host_map = True
if ((not get_files) and (not host_map)):
print()
return
python_file = ('%s/Resources/Ops/PyScripts/Windows/sentinel/fsmon.py' % ops_disk_path)
if (not os.path.exists(python_file)):
print("\nCouldn't find sentinel! Skipping...")
return
command_line = ('python %s --fresh --output-dir %s' % (python_file, output_dir))
if get_files:
command_line += ' --get-files'
if host_map:
command_line += ' --host-table'
logs_path = os.path.join(os.path.dirname(ops_disk_path), 'Logs')
command_line += (' ' + logs_path)
print(('\n\nRunning: %s\n' % command_line))
return subprocess.Popen(command_line)
def project_menu(ops_disk_path, replay_disk_path):
logs_path = os.path.join(os.path.dirname(ops_disk_path), 'Logs')
first_run = True
while True:
projects = [path for path in os.listdir(logs_path) if os.path.isdir(os.path.join(logs_path, path))]
if (first_run and (len(projects) == 1)):
choice = projects[0]
print(('Only one project found, auto-selecting %s...' % choice))
first_run = False
else:
choice = menu(projects, quitmsg='BACK TO SHARES LIST', text='Select a project to open the DSZ GUI:')
if (choice is None):
break
if (choice == REFRESH_LIST):
continue
project_log_dir = os.path.join(logs_path, choice)
write_user_defaults_file(replay_disk_path, normpath(project_log_dir))
print(("\nLaunching GUI for '%s'... " % choice), end='')
script_path = os.path.join(replay_disk_path, 'start_lp.py')
subprocess.Popen(('python %s' % script_path))
print('done.\n')
print(('Sleeping for %s seconds, then you can select another project... ' % SLEEP), end='')
time.sleep(SLEEP)
print('done.\n\n')
def write_user_defaults_file(replay_disk_path, project_log_dir):
config_dir = normpath(os.path.join(replay_disk_path, 'UserConfiguration'))
resource_dir = normpath(os.path.join(replay_disk_path, 'Resources'))
defaults_path = os.path.join(replay_disk_path, 'user.defaults')
with open(defaults_path, 'w') as output:
output.write('BuildType=true\n')
output.write(('LogDir=%s\n' % project_log_dir))
output.write('GuiType=true\n')
output.write('wait.for.output=false\n')
output.write('thread.dump=false\n')
output.write(('ConfigDir=%s\n' % config_dir))
output.write('OpMode=false\n ')
output.write('LoadPrevious=true\n')
output.write(('ResourceDir=%s\n' % resource_dir))
output.write(('OpsDisk=%s\n' % normpath(replay_disk_path)))
output.write('LocalAddress=00000001\n')
output.write('LocalMode=false\n')
output.write('replay.DSZ_KEYWORD=Extended')
def normpath(path):
norm_path = os.path.normpath(path)
return norm_path.replace('\\', '\\\\')
def menu(menu_list, text=None, quitmsg=None):
menu_list = ([REFRESH_LIST] + menu_list)
while True:
optspace = int(math.log(len(menu_list), 10))
if text:
print(('\n' + text))
if quitmsg:
print(((('%' + str(optspace)) + 'd. %s') % (0, quitmsg)))
else:
print('0. Quit')
items = 0
for i in menu_list:
items += 1
print(((('%' + str(optspace)) + 'd. %s') % (items, i)))
result = None
while ((result is None) or (result > len(menu_list)) or (result < 0)):
result = prompt('Enter selection: ')
try:
result = int(result)
except ValueError:
result = None
except TypeError:
result = None
if (result == 0):
return None
return menu_list[(result - 1)]
def prompt(text, default=None):
if default:
text += (' [%s] ' % default)
result = raw_input(text)
if (result == ''):
return (None if (default is None) else str(default))
else:
return result
def yn_prompt(text, default=True):
footer = (' [Y/n] > ' if default else ' [y/N] > ')
result = raw_input((text + footer)).lower().strip()
if (not result):
result = ('y' if default else 'n')
return (result[0].find('n') < 0)
if (__name__ == '__main__'):
try:
main()
except:
print()
traceback.print_exc()
print()
print('An unrecoverable error has occured. Cannot be wizardly for you.')
print()
raw_input('Press enter to exit.')
| 43.19337
| 448
| 0.609363
|
98bc06f63fd720e8b3aa8b9a3f5e58017cd9332c
| 8,104
|
py
|
Python
|
onnx_tf/handlers/backend/scan_mixin.py
|
MemoryChain-CN/onnx-tensorflow
|
e048c0a69b870d661143f561511329dae4acfcfa
|
[
"Apache-2.0"
] | 1
|
2020-06-04T14:16:39.000Z
|
2020-06-04T14:16:39.000Z
|
onnx_tf/handlers/backend/scan_mixin.py
|
MemoryChain-CN/onnx-tensorflow
|
e048c0a69b870d661143f561511329dae4acfcfa
|
[
"Apache-2.0"
] | 1
|
2020-06-10T06:53:21.000Z
|
2020-06-12T08:19:23.000Z
|
onnx_tf/handlers/backend/scan_mixin.py
|
yangchengtest/onnx-tf-atlas
|
7c6772399256cd14b37a1ac4d7bad948583b0034
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from onnx.helper import make_opsetid
import onnx_tf
from onnx_tf.common import data_type
class ScanMixin(object):
@classmethod
def scan(cls, node, input_dict, strict):
current_opset = [make_opsetid(cls.DOMAIN, cls.VERSION)]
body = node.attrs["body"]
# in version 8, node.inputs[0] is the sequence_lens
node_inputs = node.inputs if cls.SINCE_VERSION != 8 else \
node.inputs[1:]
# M
num_scan_inputs = int(node.attrs["num_scan_inputs"])
# N = num_inputs - M
num_state_vars = len(node_inputs) - num_scan_inputs
# K = num_outputs - N
num_scan_outputs = len(node.outputs) - num_state_vars
"""
Function to run subgraph used with tf.scan
"""
def run_subgraph(a, b):
input_values = {}
# set the input values for the subgraph
# set the values for the state variables
for i in range(num_state_vars):
input_values[body.input[i].name] = a[i]
# set the values for the scan inputs
for i in range(num_scan_inputs):
input_values[body.input[i + num_state_vars].name] = b[i]
# get the tensor operations for the onnx graph
tensor_dict = \
onnx_tf.backend.onnx_graph_to_tensorflow_ops(
graph_def=body,
input_values=input_values,
opset=current_opset,
strict=strict)
# return sequence of tensors for every subgraph output
outputs = [tensor_dict[output.name] for output in body.output]
return outputs
scan_input_axes = node.attrs.get("scan_input_axes",
[0] * num_scan_inputs)
scan_input_directions = node.attrs.get("directions"
if cls.SINCE_VERSION == 8 else
"scan_input_directions",
[0] * num_scan_inputs)
scan_output_axes = node.attrs.get("scan_output_axes",
[0] * num_scan_outputs)
scan_output_directions = node.attrs.get("scan_output_directions",
[0] * num_scan_outputs)
# if version 8 read the sequnce_lens from the first input
if cls.SINCE_VERSION == 8:
sequence_lens = input_dict[node.inputs[0]] \
if node.inputs[0] != '' else None
inputs = [input_dict[node_input] for node_input in node_inputs]
scan_inputs = inputs[num_state_vars:]
# loop over all the scan inputs and apply transpose depending
# on input axes provided and also reverse the scan inputs if
# reverse direction for scan is provided
for i in range(num_scan_inputs):
# if input axes are different than 0, use transpose to scan over
# the provided axes
if scan_input_axes[i] != 0:
transpose_perm = cls._calc_transpose_perm_input(
tf.rank(scan_inputs[i]), scan_input_axes[i])
scan_inputs[i] = tf.transpose(scan_inputs[i], transpose_perm)
# check for reverse direction scans
if scan_input_directions[i] == 1:
# version 8 has a batch dimension
axis = 0 if cls.SINCE_VERSION != 8 else 1
scan_inputs[i] = tf.reverse(scan_inputs[i], [axis])
state_vars_init = inputs[:num_state_vars]
scan_outputs_init = []
# generate sequence of zero tensors for all scan outputs
# with the correct shape and dtype
for scan_output in body.output[num_state_vars:]:
tensor_type = scan_output.type.tensor_type
shape = [d.dim_value
if (d.dim_value > 0 and d.dim_param == "") else None
for d in tensor_type.shape.dim]
dtype = data_type.onnx2tf(tensor_type.elem_type)
scan_outputs_init.append(tf.zeros(shape, dtype=dtype))
# tf.scan initilizer is state_variables_init + scan_outputs_init
initializer = state_vars_init + scan_outputs_init
if cls.SINCE_VERSION == 8:
# version == 8
# function to process the batches. it is used with tf.map_fn
def run_batches(x):
# state vars initial values per batch
initial = x[0]
# scan inputs per batch
scan_inputs = x[1]
# sequence length for the batch
seq_len = x[2]
# slice the input to the current sequence len
scan_inputs = [scan_input[:seq_len, ...]
for scan_input in scan_inputs]
# run scan on the current batch
out = tf.scan(run_subgraph, scan_inputs,
initializer=initial + scan_outputs_init)
# pad to the original shape with zeros
paddings = [[0, tf.shape(x[1][0],
out_type=seq_len.dtype)[0] - seq_len]]
for i in range(len(out)):
pads = tf.concat([paddings,
tf.zeros([(tf.rank(out[i]) - 1), 2],
dtype=tf.int32)], axis=0)
out[i] = tf.pad(out[i], pads)
return out
if sequence_lens is None:
# if sequence_lens is None, fill it with the shape of
# the input axis 1
sequence_lens = tf.fill([tf.shape(scan_inputs[0])[0]],
tf.shape(scan_inputs[0],
out_type=tf.int32)[1])
output_types = [data_type.onnx2tf(
output.type.tensor_type.elem_type) for
output in body.output]
# run scan for every batch
out = tf.map_fn(run_batches,
(state_vars_init, scan_inputs, sequence_lens),
dtype=output_types)
state_vars_outputs = []
# extract the final values of the state variables
for state_var in out[:num_state_vars]:
state_vars_outputs.append(tf.map_fn(lambda x: x[0][x[1]-1],
(state_var, sequence_lens),
state_var.dtype))
else:
# version > 8
# run the scan
out = tf.scan(run_subgraph, scan_inputs,
initializer=initializer)
# extract the final values of the state variables
state_vars_outputs = [state_var[tf.shape(state_var)[0]-1]
for state_var in out[:num_state_vars]]
scan_outputs = out[num_state_vars:]
# post process the scan outputs depending on the directions and
# axes provided.
for i in range(num_scan_outputs):
# check for reverse direction scan outputs
if scan_output_directions[i] == 1:
scan_outputs[i] = tf.reverse(scan_outputs[i], [0])
if scan_output_axes[i] != 0:
transpose_perm = cls._calc_transpose_perm_output(
tf.rank(scan_outputs[i]), scan_output_axes[i])
scan_outputs[i] = tf.transpose(scan_outputs[i], transpose_perm)
return state_vars_outputs + scan_outputs
@classmethod
def _calc_transpose_perm_input(cls, rank, axis):
if axis < 0:
axis = rank + axis
return tf.concat([[axis], tf.range(axis), tf.range(axis+1, rank)], 0)
@classmethod
def _calc_transpose_perm_output(cls, rank, axis):
if axis < 0:
axis = rank + axis
return tf.concat([tf.range(1, axis + 1), [0],
tf.range(axis + 1, rank)], 0)
| 42.878307
| 79
| 0.537019
|
7cd897a5bccab010f9653fcabb55d40999b4327e
| 2,465
|
py
|
Python
|
scripts/deprecated/story_predictor_percentiles_annotator.py
|
dwlmt/Story-Untangling
|
c56354e305f06a508b63b913989ff8856e4db5b6
|
[
"Unlicense"
] | 7
|
2020-09-12T22:32:33.000Z
|
2022-02-07T08:37:04.000Z
|
scripts/deprecated/story_predictor_percentiles_annotator.py
|
dwlmt/Story-Untangling
|
c56354e305f06a508b63b913989ff8856e4db5b6
|
[
"Unlicense"
] | 2
|
2021-08-31T15:46:16.000Z
|
2021-09-01T15:19:52.000Z
|
scripts/deprecated/story_predictor_percentiles_annotator.py
|
dwlmt/Story-Untangling
|
c56354e305f06a508b63b913989ff8856e4db5b6
|
[
"Unlicense"
] | 1
|
2021-06-02T09:33:27.000Z
|
2021-06-02T09:33:27.000Z
|
import argparse
from collections import defaultdict
import jsonlines
import numpy as np
from scipy.stats import stats
def main(args):
print(f"Arguments: {args}")
attribute_values_to_rank = defaultdict(lambda: list())
attribute_percentiles = defaultdict(lambda: list())
with jsonlines.open(args["source_json"], mode='r') as reader:
for json_obj in reader:
for attr in args["attributes_to_bucket"]:
if attr in json_obj:
attribute_values_to_rank[attr].append(json_obj[attr])
for k, v in attribute_values_to_rank.items():
attr_value_vec = np.array(v)
# This corresponds to culmative distribution where x% have a values that is lower than or equal to this.
attr_perc_rank = stats.rankdata(attr_value_vec, "max") / len(attr_value_vec)
attribute_percentiles[k].extend(attr_perc_rank.tolist())
attribute_keys_list = attribute_percentiles.keys()
attribute_values_list = attribute_percentiles.values()
attribute_percentiles_combined = []
for values in zip(*attribute_values_list):
percentiles_per_attr = {}
for i, attr in enumerate(attribute_keys_list):
percentiles_per_attr[f"{attr}_percentile"] = values[i]
attribute_percentiles_combined.append(percentiles_per_attr)
with jsonlines.open(args["source_json"], mode='r') as reader:
with jsonlines.open(args["target_json"], mode='w') as writer:
for json_obj, percentiles in zip(reader, attribute_percentiles_combined):
out_json_obj = {**json_obj, **percentiles}
print(out_json_obj)
writer.write(out_json_obj)
parser = argparse.ArgumentParser(
description='Reads an Story Predictor output and adds buckets into the precentile output')
parser.add_argument('--source-json', required=True, type=str, help="The source JSON lines file.")
parser.add_argument('--target-json', required=True, type=str, help="The target JSON lines file.")
parser.add_argument('--attributes_to_bucket',
default=["neighbour_correct_dot_product", "neighbour_correct_log_probs",
"neighbour_correct_similarity_cosine", "neighbour_correct_distance_l1",
"neighbour_correct_distance_l2"], type=str, nargs='+',
help="A list of attributes to bucket into the percentiles.")
args = parser.parse_args()
main(vars(args))
| 43.245614
| 112
| 0.689249
|
570441f03bf5b5b90a0006a92aa560f58c64f9ea
| 1,042
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
RafyAmgadBenjamin/recipe-app-api
|
7b14ec9b422704205819fca20ae7ab1e713e4625
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
RafyAmgadBenjamin/recipe-app-api
|
7b14ec9b422704205819fca20ae7ab1e713e4625
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
RafyAmgadBenjamin/recipe-app-api
|
7b14ec9b422704205819fca20ae7ab1e713e4625
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-10-19 11:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=50)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.931034
| 118
| 0.603647
|
3564c293a5ef2d5a57463dd8a46e0e654f5c9409
| 888
|
py
|
Python
|
data.py
|
songer1993/L42-Practical-2
|
24416ea5b029c448f7ffea861ab914340eb26fa7
|
[
"MIT"
] | null | null | null |
data.py
|
songer1993/L42-Practical-2
|
24416ea5b029c448f7ffea861ab914340eb26fa7
|
[
"MIT"
] | null | null | null |
data.py
|
songer1993/L42-Practical-2
|
24416ea5b029c448f7ffea861ab914340eb26fa7
|
[
"MIT"
] | null | null | null |
from keras.utils import np_utils
import scipy.io
import numpy as np
np.random.seed(0)
def load_data():
rows, cols = 28, 28
nb_classes = 10
DATA_DIR = 'notMNIST_small.mat'
mat = scipy.io.loadmat(DATA_DIR)
X = mat['images']
Y = mat['labels']
# Move last column to front
X = np.rollaxis(X, 2)
# Reshape and format input
X = X.reshape(X.shape[0], rows, cols, 1)
X = X.astype('float32')
X /= 255.0
# Hot encoding
Y = Y.astype(int)
Y = np_utils.to_categorical(Y, nb_classes)
# Divide into test and train sets
perm = np.random.permutation(X.shape[0])
train_size = 13000
X_train = X[perm[:train_size]]
X_test = X[perm[train_size:]]
Y_train = Y[perm[:train_size]]
Y_test = Y[perm[train_size:]]
return (X_train, Y_train, X_test, Y_test)
if __name__ == '__main__':
load_data()
pass
| 18.122449
| 46
| 0.620495
|
46b5d9a865f4ad817d09556f18f9722f72d4a75e
| 1,618
|
py
|
Python
|
setup.py
|
samedamci/basedbinpy
|
19352e5ce5c410ca9eeac9d3cb190488c5745258
|
[
"0BSD"
] | null | null | null |
setup.py
|
samedamci/basedbinpy
|
19352e5ce5c410ca9eeac9d3cb190488c5745258
|
[
"0BSD"
] | null | null | null |
setup.py
|
samedamci/basedbinpy
|
19352e5ce5c410ca9eeac9d3cb190488c5745258
|
[
"0BSD"
] | null | null | null |
from setuptools import find_packages, setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), "r", encoding="utf-8") as f:
long_description = f.read()
source_url = "https://github.com/samedamci/basedbinpy"
setup(
name="basedbinpy",
version="0.5.2",
description="Simple python library for basedbin pastebin-like service.",
long_description=long_description,
long_description_content_type="text/markdown",
author="samedamci",
author_email="samedamci@disroot.org",
url=source_url,
project_urls={
"Source": source_url,
"Tracker": f"{source_url}/issues",
},
install_requires=["requests", "bson==0.5.10"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries",
"Environment :: Console",
"Topic :: Utilities",
"Typing :: Typed",
],
keywords="basedbin library api client",
python_requires=">=3.6",
packages=find_packages(include=["basedbinpy"]),
)
| 35.173913
| 78
| 0.639679
|
e8e2c33469a46d8c678722572d4b94f70d687bcc
| 12,391
|
py
|
Python
|
save_the_change/mixins.py
|
juancferrer/django-save-the-change
|
fc16a74d20dc040725b16849aec93399cbea7672
|
[
"Apache-2.0"
] | null | null | null |
save_the_change/mixins.py
|
juancferrer/django-save-the-change
|
fc16a74d20dc040725b16849aec93399cbea7672
|
[
"Apache-2.0"
] | null | null | null |
save_the_change/mixins.py
|
juancferrer/django-save-the-change
|
fc16a74d20dc040725b16849aec93399cbea7672
|
[
"Apache-2.0"
] | 1
|
2019-08-16T18:21:57.000Z
|
2019-08-16T18:21:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
from collections import Mapping
from copy import copy, deepcopy
from datetime import date, time, datetime, timedelta, tzinfo
from decimal import Decimal
from uuid import UUID
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models import ManyToManyField, ManyToOneRel
__all__ = ('SaveTheChange', 'TrackChanges')
#: A :py:class:`set` listing known immutable types.
IMMUTABLE_TYPES = set(getattr(settings, 'STC_IMMUTABLE_TYPES', (
type(None), bool, float, complex, Decimal,
str, bytes, tuple, frozenset,
date, time, datetime, timedelta, tzinfo,
UUID, int)))
INFINITELY_ITERABLE_IMMUTABLE_TYPES = set(getattr(settings, 'STC_INFINITELY_ITERABLE_IMMUTABLE_TYPES', (str, bytes)))
class DoesNotExist:
"""
It's unlikely, but there could potentially be a time when a field is added
to or removed from an instance. This class represents a field in a state of
nonexistance, just in case we ever run into it.
"""
pass
def is_mutable(obj):
if type(obj) not in IMMUTABLE_TYPES:
return True
elif type(obj) not in INFINITELY_ITERABLE_IMMUTABLE_TYPES:
try:
for sub_obj in iter(obj):
if is_mutable(sub_obj):
return True
except TypeError:
pass
return False
class BaseChangeTracker(object):
"""
Adds a :py:class:`dict` named :attr:`._changed_fields` to the model, which
stores fields that have changed. The key is the field name, and the value
the original value of the field from the database.
If the value of a field is changed back to its original value, its entry is
removed from :attr:`._changed_fields`. Thus, overhead is kept at a minimum.
A caveat: This can't do anything to help you with
:class:`~django.db.models.ManyToManyField`\s nor reverse relationships, which
is par for the course: they aren't handled by
:meth:`~django.db.models.Model.save`, but are pushed to the database
immediately when changed.
"""
def __init__(self, *args, **kwargs):
super(BaseChangeTracker, self).__init__(*args, **kwargs)
self._mutable_fields = {} #: A :py:class:`dict` storing likely mutable fields.
self._changed_fields = {} #: A :py:class:`dict` storing changed fields.
def __getattribute__(self, name):
"""
Checks the returned value from fields to see if it's known to be
immutable. If it isn't, adds it to :attr:`._mutable_fields` so we know
to push it back to the db. This allows us to cover the case wherein a
mutable value is accessed and then some part of that value is altered.
"""
value = super(BaseChangeTracker, self).__getattribute__(name)
if (
not hasattr(value, '__call__') and
'_mutable_fields' in super(BaseChangeTracker, self).__getattribute__('__dict__')
and name in (field.attname for field in super(BaseChangeTracker, self).__getattribute__('_meta').concrete_fields)
):
# We can't do an isinstance() check here since a subclass could
# violate the immutability promise.
if is_mutable(value):
super(BaseChangeTracker, self).__getattribute__('_mutable_fields')[name] = deepcopy(value)
return value
def __setattr__(self, name, value):
"""
Updates :attr:`._changed_fields` when new values are set for fields.
"""
if hasattr(self, '_changed_fields') and name in super(BaseChangeTracker, self).__getattribute__('_meta')._forward_fields_map:
try:
field = self._meta.get_field(name)
except FieldDoesNotExist:
field = None
if field and not field.hidden and field.__class__ not in (ManyToManyField, ManyToOneRel):
old = self.__dict__.get(field.attname, DoesNotExist)
if old is not DoesNotExist and field.is_relation:
try:
# If we've already got a hydrated object we can stash,
# awesome.
hydrated_old = getattr(self, getattr(self.__class__, field.name).cache_name)
if hydrated_old.pk != old:
hydrated_old = DoesNotExist
except AttributeError:
hydrated_old = DoesNotExist
else:
hydrated_old = DoesNotExist
# A parent's __setattr__ may change value.
super(BaseChangeTracker, self).__setattr__(name, value)
new = self.__dict__.get(field.attname, DoesNotExist)
try:
changed = (old != new)
except Exception: # pragma: no cover (covers naive/aware datetime comparison failure; unreachable in py3)
changed = True
if changed:
if field.attname in self._changed_fields:
if self._changed_fields[field.attname] == new:
# We've changed this field back to its original
# value from the database. No need to push it
# back up.
self._changed_fields.pop(field.attname, None)
if field.attname != field.name:
self._changed_fields.pop(field.name, None)
else:
self._changed_fields[field.attname] = copy(old)
if field.attname != field.name and hydrated_old is not DoesNotExist:
self._changed_fields[field.name] = copy(hydrated_old)
else:
super(BaseChangeTracker, self).__setattr__(name, value)
else:
super(BaseChangeTracker, self).__setattr__(name, value)
def save(self, *args, **kwargs):
"""
Clears :attr:`._changed_fields`.
"""
super(BaseChangeTracker, self).save(*args, **kwargs)
self._mutable_fields = {}
self._changed_fields = {}
class SaveTheChange(BaseChangeTracker):
"""
A model mixin that keeps track of fields that have changed since model
instantiation, and when saved updates only those fields.
If :meth:`~django.db.models.Model.save` is called with ``update_fields``,
the passed ``kwarg`` is given precedence. Similarly, if ``force_insert`` is
set, ``update_fields`` will not be.
"""
def save(self, *args, **kwargs):
"""
Builds and passes the ``update_fields`` kwarg to Django.
"""
if self.pk and hasattr(self, '_changed_fields') and hasattr(self, '_mutable_fields') and 'update_fields' not in kwargs and not kwargs.get('force_insert', False):
kwargs['update_fields'] = (
[key for key, value in self._changed_fields.items()] +
[key for key, value in self._mutable_fields.items() if hasattr(self, key) and getattr(self, key) != value]
)
super(SaveTheChange, self).save(*args, **kwargs)
class OldValues(Mapping):
def __init__(self, instance):
self.instance = instance
def __getitem__(self, field_name):
field = self.instance._meta._forward_fields_map[field_name]
try:
if field.attname not in self.instance._changed_fields:
return getattr(self.instance, field.name)
elif field.name in self.instance._changed_fields:
return self.instance._changed_fields[field.name]
elif field.is_relation and self.instance._changed_fields[field.attname] is not None:
try:
self.instance._changed_fields[field.name] = getattr(
self.instance.__class__,
field.name
).get_queryset().get(pk=self.instance._changed_fields[field.attname])
except self.instance.DoesNotExist:
self.instance._changed_fields[field.name] = DoesNotExist
else:
return self.instance._changed_fields[field.attname]
except (AttributeError, KeyError):
raise KeyError(field_name)
def __iter__(self):
for field in self.instance._meta.get_fields():
yield field.name
def __len__(self):
return len(self.instance._meta.get_fields())
def __repr__(self):
return '<OldValues: %s>' % repr(self.instance)
class TrackChanges(BaseChangeTracker):
"""
A model mixin that tracks model fields' values and provide some properties
and methods to work with the old/new values.
"""
@property
def has_changed(self):
"""
A :py:obj:`bool` indicating if any fields have changed.
"""
return bool(self._changed_fields)
@property
def changed_fields(self):
"""
A :py:obj:`set` of changed fields.
"""
return set(self._meta._forward_fields_map[name].name for name in self._changed_fields.keys())
@property
def old_values(self):
"""
A :py:class:`dict` of the old field values.
"""
return OldValues(self)
def revert_fields(self, field_names=None):
"""
Reverts supplied fields to their original values.
:param list fields: Fields to revert.
"""
if not field_names:
for field_name in self.changed_fields:
self.revert_field(field_name)
else:
for field_name in field_names:
self.revert_field(field_name)
def revert_field(self, field_name):
if field_name in self._meta._forward_fields_map:
field = self._meta._forward_fields_map[field_name]
if field.name in self._changed_fields:
# Bypass our __setattr__ since we know what the result will be.
super(BaseChangeTracker, self).__setattr__(field.name, self._changed_fields[field.name])
self._changed_fields.pop(field.name)
if field.attname in self._changed_fields:
super(BaseChangeTracker, self).__setattr__(field.attname, self._changed_fields[field.attname])
# If you don't have a hydrated instance and you set a related
# field to None, the field cache is also set to None. Since
# when reverting a dehydrated instance we only set the pk
# attribute, we have to also clear the cache ourselves if the
# instance in it is None or otherwise incorrect to restore
# expected behavior.
if field.is_relation:
hydrated_old = getattr(self, getattr(self.__class__, field.name).cache_name, DoesNotExist)
if hydrated_old is not DoesNotExist and (hydrated_old is None or hydrated_old.pk != self._changed_fields[field.attname]):
delattr(self, getattr(self.__class__, field.name).cache_name)
self._changed_fields.pop(field.attname)
class HideMetaOpts(models.base.ModelBase):
"""
A metaclass that hides added attributes from a class' ``Meta``, since
otherwise Django's fascistic Meta options sanitizer will throw an
exception. Default values can be set with default_meta_opts. By default
only opts defined in default_meta_opts will be hidden from Django; if you
want to hide everything unknown, set hide_unknown_opts to ``True``.
(If you have another mixin that adds to your model's ``Meta``, create a
``metaclass`` that inherits from both this and the other
mixin's ``metaclass``.)
"""
default_meta_opts = {
'update_together': (),
}
hide_unknown_opts = False
def __new__(cls, name, bases, attrs):
if not [b for b in bases if isinstance(b, HideMetaOpts)]:
return super(HideMetaOpts, cls).__new__(cls, name, bases, attrs)
else:
meta_opts = deepcopy(cls.default_meta_opts)
# Deferred fields won't have our model's Meta.
if 'Meta' in attrs and attrs['Meta'].__module__ != 'django.db.models.query_utils':
meta = attrs.get('Meta')
else:
# Meta is at a class level, and could be in any of the bases.
for base in bases:
meta = getattr(base, '_meta', None)
if meta:
break
# If there's no _meta then we're falling back to defaults.
if meta:
for opt, value in vars(meta).items():
if opt not in models.options.DEFAULT_NAMES and (cls.hide_unknown_opts or opt in meta_opts):
meta_opts[opt] = value
delattr(meta, opt)
new_class = super(HideMetaOpts, cls).__new__(cls, name, bases, attrs)
if meta:
for opt in meta_opts:
setattr(meta, opt, meta_opts[opt])
# We theoretically don't have to set this twice, but just in case.
for opt in meta_opts:
setattr(new_class._meta, opt, meta_opts[opt])
return new_class
#class UpdateTogetherModel(BaseChangeTracker, models.Model, six.with_metaclass(HideMetaOpts)):
# """
# A replacement for :class:`~django.db.models.Model` which allows you to
# specify the ``Meta`` attribute ``update_together``: a
# :py:obj:`list`/:py:obj:`tuple` of :py:obj:`list`\s/:py:obj:`tuple`\s
# defining fields that should always be updated together if any of
# them change.
#
# """
#
# def save(self, *args, **kwargs):
# if 'update_fields' in kwargs:
# update_fields = set(kwargs['update_fields'])
#
# for field in kwargs['update_fields']:
# update_fields.update(self._meta.update_together.get(field, []))
#
# kwargs['update_fields'] = list(update_fields)
#
# super(UpdateTogetherModel, self).save(*args, **kwargs)
#
# class Meta:
# abstract = True
| 30.519704
| 163
| 0.708014
|
ec615257f773d92f9143b8760b084827f0900a52
| 6,548
|
py
|
Python
|
protocol/Channel.py
|
Anarchid/uberserver
|
312b69e379e8999c440f1dd6e5c0b00a9932e7b1
|
[
"MIT"
] | null | null | null |
protocol/Channel.py
|
Anarchid/uberserver
|
312b69e379e8999c440f1dd6e5c0b00a9932e7b1
|
[
"MIT"
] | null | null | null |
protocol/Channel.py
|
Anarchid/uberserver
|
312b69e379e8999c440f1dd6e5c0b00a9932e7b1
|
[
"MIT"
] | null | null | null |
from AutoDict import AutoDict
import time
class Channel(AutoDict):
def __init__(self, root, name, id = 0, users=[], admins=[],
ban={}, allow=[], autokick='ban', chanserv=False,
owner='', mutelist={}, antispam=False,
censor=False, antishock=False, topic=None,
key=None, history=False, **kwargs):
self.id = id
self._root = root
self.name = name
self.users = users
self.admins = admins
self.ban = ban
self.allow = allow
self.autokick = autokick
self.chanserv = chanserv
self.owner = owner
self.mutelist = mutelist
self.antispam = antispam
self.censor = censor
self.antishock = antishock
self.topic = topic
self.key = key
self.history = history
self.__AutoDictInit__()
if self._root and chanserv and self._root.chanserv and not name in self._root.channels:
self._root.chanserv.Send('JOIN %s' % self.name)
def broadcast(self, message):
self._root.broadcast(message, self.name)
def channelMessage(self, message):
self.broadcast('CHANNELMESSAGE %s %s' % (self.name, message))
def register(self, client, owner):
self.owner = owner.db_id
def addUser(self, client):
username = client.username
if not username in self.users:
self.users.append(username)
self.broadcast('JOINED %s %s' % (self.name, username))
def removeUser(self, client, reason=None):
chan = self.name
username = client.username
if username in self.users:
self.users.remove(username)
if self.name in client.channels:
client.channels.remove(chan)
if reason and len(reason) > 0:
self._root.broadcast('LEFT %s %s %s' % (chan, username, reason), chan)
else:
self._root.broadcast('LEFT %s %s' % (chan, username), chan)
def isAdmin(self, client):
return client and ('admin' in client.accesslevels)
def isMod(self, client):
return client and (('mod' in client.accesslevels) or self.isAdmin(client))
def isFounder(self, client):
return client and ((client.db_id == self.owner) or self.isMod(client))
def isOp(self, client):
return client and ((client.db_id in self.admins) or self.isFounder(client))
def getAccess(self, client): # return client's security clearance
return 'mod' if self.isMod(client) else\
('founder' if self.isFounder(client) else\
('op' if self.isOp(client) else\
'normal'))
def isMuted(self, client):
return client.db_id in self.mutelist
def getMuteMessage(self, client):
if self.isMuted(client):
m = self.mutelist[client.db_id]
if m['expires'] == 0:
return 'muted forever'
else:
# TODO: move format_time, bin2dec, etc to a utilities class or module
return 'muted for the next %s.' % (client._protocol._time_until(m['expires']))
else:
return 'not muted'
def isAllowed(self, client):
if self.autokick == 'allow':
return (self.isOp(client) or (client.db_id in self.allow)) or 'not allowed here'
elif self.autokick == 'ban':
return (self.isOp(client) or (client.db_id not in self.ban)) or self.ban[client.db_id]
def setTopic(self, client, topic):
self.topic = topic
if topic in ('*', None):
if self.topic:
self.channelMessage('Topic disabled.')
topicdict = {}
else:
self.channelMessage('Topic changed.')
topicdict = {'user':client.username, 'text':topic, 'time':time.time()}
self.broadcast('CHANNELTOPIC %s %s %s %s'%(self.name, client.username, topicdict['time'], topic))
self.topic = topicdict
def setKey(self, client, key):
if key in ('*', None):
if self.key:
self.key = None
self.channelMessage('<%s> unlocked this channel' % client.username)
else:
self.key = key
self.channelMessage('<%s> locked this channel with a password' % client.username)
def setFounder(self, client, target):
if not target: return
self.owner = target.db_id
self.channelMessage("<%s> has just been set as this channel's founder by <%s>" % (target.username, client.username))
def opUser(self, client, target):
if target and not target.db_id in self.admins:
self.admins.append(target.db_id)
self.channelMessage("<%s> has just been added to this channel's operator list by <%s>" % (target.username, client.username))
def deopUser(self, client, target):
if target and target.db_id in self.admins:
self.admins.remove(target.db_id)
self.channelMessage("<%s> has just been removed from this channel's operator list by <%s>" % (target.username, client.username))
def kickUser(self, client, target, reason=''):
if self.isFounder(target): return
if target and target.username in self.users:
target.Send('FORCELEAVECHANNEL %s %s %s' % (self.name, client.username, reason))
self.channelMessage('<%s> has kicked <%s> from the channel%s' % (client.username, target.username, (' (reason: %s)'%reason if reason else '')))
self.removeUser(target, 'kicked from channel%s' % (' (reason: %s)'%reason if reason else ''))
def banUser(self, client, target, reason=''):
if self.isFounder(target): return
if target and not target.db_id in self.ban:
self.ban[target.db_id] = reason
self.kickUser(client, target, reason)
self.channelMessage('<%s> has been banned from this channel by <%s>' % (target.username, client.username))
def unbanUser(self, client, target):
if target and target.db_id in self.ban:
del self.ban[target.db_id]
self.channelMessage('<%s> has been unbanned from this channel by <%s>' % (target.username, client.username))
def allowUser(self, client, target):
if target and not client.db_id in self.allow:
self.allow.append(client.db_id)
self.channelMessage('<%s> has been allowed in this channel by <%s>' % (target.username, client.username))
def disallowUser(self, client, target):
if target and client.db_id in self.allow:
self.allow.remove(client.db_id)
self.channelMessage('<%s> has been disallowed in this channel by <%s>' % (target.username, client.username))
def muteUser(self, client, target, duration=0, ip=False, quiet=False):
if self.isFounder(target): return
if target and not client.db_id in self.mutelist:
if not quiet:
self.channelMessage('<%s> has muted <%s>' % (client.username, target.username))
try:
duration = float(duration)*60
if duration < 1:
duration = 0
else:
duration = time.time() + duration
except: duration = 0
self.mutelist[target.db_id] = {'expires':duration, 'ip':ip, 'quiet':quiet}
def unmuteUser(self, client, target):
if target and target.db_id in self.mutelist:
del self.mutelist[target.db_id]
self.channelMessage('<%s> has unmuted <%s>' % (client.username, target.username))
| 35.586957
| 146
| 0.693952
|
87c129a97517ecccbd58feaf1a902c8e697f12de
| 349
|
py
|
Python
|
src/service/main_service.py
|
JoeIOU/metedata_fusion_tools
|
3cf45338c4ae28e043142bf728ee6c91749ff72e
|
[
"Apache-2.0"
] | null | null | null |
src/service/main_service.py
|
JoeIOU/metedata_fusion_tools
|
3cf45338c4ae28e043142bf728ee6c91749ff72e
|
[
"Apache-2.0"
] | null | null | null |
src/service/main_service.py
|
JoeIOU/metedata_fusion_tools
|
3cf45338c4ae28e043142bf728ee6c91749ff72e
|
[
"Apache-2.0"
] | null | null | null |
from service.metadata_service import app
from service.view_service import app_view
from httpserver import httpserver
app1 = httpserver.getApp()
app1.register_blueprint(app)
app1.register_blueprint(app_view)
# @app1.route("/")
# def hello():
# return "Hello World!"
if __name__ == "__main__":
# app1.run()
httpserver.startWebServer()
| 20.529412
| 41
| 0.744986
|
36536a29c1c1f2eb140cdad90e60305cf624cd2b
| 1,496
|
py
|
Python
|
aftool/log.py
|
Asdil/aftool
|
cff4fd91dd7d0b7339b02a90824ffba40e7bdc63
|
[
"MIT"
] | null | null | null |
aftool/log.py
|
Asdil/aftool
|
cff4fd91dd7d0b7339b02a90824ffba40e7bdc63
|
[
"MIT"
] | null | null | null |
aftool/log.py
|
Asdil/aftool
|
cff4fd91dd7d0b7339b02a90824ffba40e7bdc63
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: log
Description :
Author : 艾登科技 Asdil
date: 2020/7/9
-------------------------------------------------
Change Activity:
2020/7/9:
-------------------------------------------------
"""
__author__ = 'Asdil'
import logging
def simple_init(level='INFO', log_path=None):
"""add方法用于新建一个log
Parameters
----------
level: str
日志级别
log_path: str
保存路径
Returns
----------
"""
logger = logging.getLogger()
level_dict = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR}
logger.setLevel(level_dict[level])
if log_path is not None:
# create a file handler
handler = logging.FileHandler(log_path)
handler.setLevel(level)
# create a logging format
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(filename)s:%(funcName)s:%(lineno)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger
def init(log_path, level='INFO'):
"""init方法用于新建一个log, 但不能在spark上面跑
Parameters
----------
log_path : str
路径
level : str
日志等级
Returns
----------
"""
from loguru import logger
logger.add(log_path, rotation="1 MB", enqueue=True, level=level)
return logger
| 23.375
| 121
| 0.520722
|
87abc207fc5cdb172553d7ead1a6ca75b00d6e75
| 430
|
py
|
Python
|
plotly/validators/heatmap/_xsrc.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/heatmap/_xsrc.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/heatmap/_xsrc.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name='xsrc', parent_name='heatmap', **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 30.714286
| 76
| 0.64186
|
a58abe5065b4fa03fae006edd45fd53130667fc6
| 4,454
|
py
|
Python
|
datamart/unit_tests/test_utils.py
|
ShadoWander/datamart
|
35341f8fdb6c6b8232a1a84fa19ba1c765926ead
|
[
"MIT"
] | null | null | null |
datamart/unit_tests/test_utils.py
|
ShadoWander/datamart
|
35341f8fdb6c6b8232a1a84fa19ba1c765926ead
|
[
"MIT"
] | null | null | null |
datamart/unit_tests/test_utils.py
|
ShadoWander/datamart
|
35341f8fdb6c6b8232a1a84fa19ba1c765926ead
|
[
"MIT"
] | null | null | null |
from datamart.utilities.utils import Utils
import unittest, os, json
from datamart.materializers.materializer_base import MaterializerBase
from datamart.materializers.noaa_materializer import NoaaMaterializer
import pandas as pd
from pandas.util.testing import assert_frame_equal
class TestUtils(unittest.TestCase):
def setUp(self):
self.materializers_path = os.path.join(os.path.dirname(__file__), "../materializers")
self.resources_path = os.path.join(os.path.dirname(__file__), "resources")
self.dataframe_equal = assert_frame_equal
@Utils.test_print
def test_validate_schema(self):
with open(os.path.join(os.path.dirname(__file__), "resources/sample_schema.json"), "r") as f:
description = json.load(f)
self.assertEqual(Utils.validate_schema(description["description"]), True)
@Utils.test_print
def test_date_validate(self):
self.assertEqual(Utils.date_validate("2018-10-10"), "2018-10-10T00:00:00")
@Utils.test_print
def test_temporal_coverage_validate(self):
coverage = {}
self.assertEqual(Utils.temporal_coverage_validate(coverage), {"start": None, "end": None})
coverage = {"start": None}
self.assertEqual(Utils.temporal_coverage_validate(coverage), {"start": None, "end": None})
coverage = {"end": None}
self.assertEqual(Utils.temporal_coverage_validate(coverage), {"start": None, "end": None})
coverage = {"start": "2018-09-23T00:00:00", "end": "2018-10-10"}
self.assertEqual(Utils.temporal_coverage_validate(coverage),
{'end': '2018-10-10T00:00:00', 'start': '2018-09-23T00:00:00'})
coverage = {"start": "2018-00", "end": "2018-10-10"}
self.assertEqual(Utils.temporal_coverage_validate(coverage),
{'end': '2018-10-10T00:00:00', 'start': None})
@Utils.test_print
def test_load_materializer(self):
materializer = Utils.load_materializer("noaa_materializer")
self.assertEqual(issubclass(type(materializer), MaterializerBase), True)
self.assertIn(type(materializer).__name__, NoaaMaterializer.__name__)
@Utils.test_print
def test_materialize(self):
fake_metadata = {
"materialization": {
"python_path": "noaa_materializer",
"arguments": {
"type": 'PRCP'
}
}
}
fake_constrains = {
"date_range": {
"start": "2016-09-23",
"end": "2016-09-23"
},
"named_entity": {2: ["los angeles"]}
}
result = Utils.materialize(metadata=fake_metadata, constrains=fake_constrains).infer_objects()
expepcted = pd.read_csv(os.path.join(os.path.dirname(__file__), "resources/noaa_result.csv"))
self.dataframe_equal(result, expepcted)
@Utils.test_print
def test_generate_metadata_from_dataframe(self):
data = {
'Name': ['Tom', 'Jack', 'Steve', 'Ricky'],
'Age': [28, 34, 29, 42],
'Date': ["2018-10-05", "2014-02-23", "2020-09-23T00:10:00", "2023213"]
}
df = pd.DataFrame(data)
expected = {
'datamart_id': None,
'materialization': {
'python_path': 'default_materializer', 'arguments': None
},
'variables': [
{
'datamart_id': None,
'semantic_type': [],
'name': 'Age',
'description': 'column name: Age, dtype: int64'
},
{
'datamart_id': None,
'semantic_type': [],
'name': 'Date',
'description': 'column name: Date, dtype: object',
'temporal_coverage': {'start': '2014-02-23T00:00:00', 'end': '2020-09-23T00:10:00'}
},
{
'datamart_id': None,
'semantic_type': [],
'name': 'Name',
'description': 'column name: Name, dtype: object'
}
],
'title': 'Age Date Name',
'description': 'Age : int64, Date : object, Name : object',
'keywords': ['Age', 'Date', 'Name']
}
self.assertEqual(Utils.generate_metadata_from_dataframe(data=df), expected)
| 41.626168
| 103
| 0.566233
|
08e6e26ad989febc95727eeee229a743f27b70df
| 1,562
|
py
|
Python
|
api/tests/python/tests/substructure/tau_enumeration.py
|
00Green27/Indigo
|
dc09f838b8b05d4bba2f79d0af5617d637f0b70e
|
[
"Apache-2.0"
] | null | null | null |
api/tests/python/tests/substructure/tau_enumeration.py
|
00Green27/Indigo
|
dc09f838b8b05d4bba2f79d0af5617d637f0b70e
|
[
"Apache-2.0"
] | null | null | null |
api/tests/python/tests/substructure/tau_enumeration.py
|
00Green27/Indigo
|
dc09f838b8b05d4bba2f79d0af5617d637f0b70e
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
#indigo_inchi = IndigoInchi(indigo);
def testEnumTautomersForMolecule (molecule):
iter = indigo.iterateTautomers(molecule, 'INCHI')
lst = list()
for mol in iter:
prod = mol.clone()
lst.append(prod.canonicalSmiles())
lst.sort()
print(" " + "\n ".join(map(lambda x, y: str(x) + ") " + y, range(1, len(lst) + 1), lst)) + '\n')
def testEnumTautomersForSDF(sdf_file):
for idx, molecule in enumerate(indigo.iterateSDFile(sdf_file)):
try:
print("%d. %s" % (idx + 1, molecule.smiles()))
molecule.dearomatize()
testEnumTautomersForMolecule(molecule)
molecule.aromatize()
testEnumTautomersForMolecule(molecule)
except IndigoException as e:
print(getIndigoExceptionText(e))
print("This is the case when not all tautomers are found for the first time and the algorithm requires the second attempt:")
testEnumTautomersForMolecule (indigo.loadMolecule('OC1N=C2C(=NC(N)=NC(=O)2)NC(O)=1'));
print("Test tautomers1-small.sdf")
testEnumTautomersForSDF(joinPath('molecules', 'tautomers1-small.sdf'))
print("Test tautomers2-small.sdf")
testEnumTautomersForSDF(joinPath('molecules', 'tautomers2-small.sdf'))
print("Test tautomers1-large.sdf")
testEnumTautomersForSDF(joinPath('molecules', 'tautomers1-large.sdf.gz'))
print("Test tautomers2-large.sdf")
testEnumTautomersForSDF(joinPath('molecules', 'tautomers2-large.sdf.gz'))
| 35.5
| 125
| 0.710627
|
d86d0ea3bdc31f38a00e5291d9b33d0201249757
| 5,687
|
py
|
Python
|
poold/utils/history.py
|
geflaspohler/poold
|
4ddd085597b85e8c737b2f904f504437f728513b
|
[
"MIT"
] | 7
|
2021-08-02T15:31:01.000Z
|
2022-02-11T14:36:46.000Z
|
poold/utils/history.py
|
geflaspohler/poold
|
4ddd085597b85e8c737b2f904f504437f728513b
|
[
"MIT"
] | null | null | null |
poold/utils/history.py
|
geflaspohler/poold
|
4ddd085597b85e8c737b2f904f504437f728513b
|
[
"MIT"
] | 1
|
2021-07-15T23:05:02.000Z
|
2021-07-15T23:05:02.000Z
|
""" History object for book-keeping online learning progress.
For example:
import poold
models = ["model1", "model2"]
duration = 20
learner, history = poold.create("adahedged", model_list=models, T=duration)
"""
import copy
import numpy as np
import pdb
class History(object):
def __init__(self, models, default_play, low_memory=False):
""" Online leanring history object.
Args:
models (list): list of model names
T (int): algorithm duration
"""
self.models = models
self.d = len(models)
self.default_play = default_play
self.play_history = {} # history of past algorithm plays
self.loss_history = {} # history of past observed loss objects
self.hint_history = {} # history of past observed loss objects
self.grad_history = {} # history of past observed gradients
self.param_history = {} # history of past parameters
self.os_history = {} # history of outstanding feedbacks
self.realized_losses = {} # history of past realized losses
self.low_memory = low_memory # if True, avoid saving full loss functions
def get_times(self):
""" Return history times """
return list(self.play_history.keys())
def record_play(self, t, w):
""" Record play at round t.
Args:
t: a time representation
w: a play representation
"""
self.play_history[t] = copy.copy(w)
def record_losses(self, losses_fb, verbose=False):
""" Record the received loss at time t.
Args:
losses_fb: list of (time, loss objects) tuples
"""
for t_fb, loss_fb in losses_fb:
# t_fb += self.learner_base_time
assert(t_fb in self.play_history)
if t_fb in self.grad_history:
if verbose:
print(f"Warning: time {t_fb} is already in gradient history and won't be recomputed.")
continue
if not self.low_memory:
self.loss_history[t_fb] = copy.deepcopy(loss_fb)
self.grad_history[t_fb] = loss_fb['grad'](w=self.play_history[t_fb])
self.realized_losses[t_fb] = loss_fb['fun'](w=self.play_history[t_fb])
def record_hint(self, t, hint):
""" Record the received hint at time t.
Args:
t: a time representation
hint (dict): hint dictionary
"""
# t += self.learner_base_time
self.hint_history[t] = copy.deepcopy(hint)
def record_params(self, t, params):
""" Record the received hint at time t.
Args:
t: a time representation
params (dict): parameter dictionary
"""
# t += self.learner_base_time
self.param_history[t] = copy.deepcopy(params)
def record_os(self, t, os):
""" Record the outstanding feedbacks at time t.
Args:
t: a time representation
os (list): list of oustanding feedback times
"""
# t += self.learner_base_time
self.os_history[t] = copy.deepcopy(os)
def get(self, t):
""" Get full history at time t """
g = self.get_grad(t)
w = self.get_play(t)
h = self.get_hint(t)
hp = self.get_hint(t-1)
params = self.get_params(t)
os = self.get_os(t)
D = len(os)
g_os = sum([self.get_grad(t_fb) for t_fb in os])
return {
"t": t, # time
"w": w, # play
"g": g, # gradient
"g_os": g_os, # outstanding gradient sum
"h": h, # hint
"hp": hp, # previous hint
"D": D, # delay length
"params": params # parameters
}
def get_loss(self, t):
""" Get the loss at time t """
# t += self.learner_base_time
assert(t in self.grad_history)
if self.low_memory:
return None, self.realized_losses[t], self.grad_history[t]
return self.loss_history[t], self.realized_losses[t], self.grad_history[t]
def get_grad(self, t):
""" Get the loss gradient at time t """
# t += self.learner_base_time
assert(t in self.grad_history)
return self.grad_history[t]
def get_hint(self, t):
""" Get the hint at time t """
# t += self.learner_base_time
if t not in self.hint_history:
return np.zeros((self.d,))
assert(t in self.hint_history)
return self.hint_history[t]
def get_params(self, t):
""" Get the parameters at time t """
# t += self.learner_base_time
assert(t in self.param_history)
return self.param_history[t]
def get_os(self, t):
""" Get the parameters at time t """
# t += self.learner_base_time
assert(t in self.os_history)
return self.os_history[t]
def get_play(self, t, return_past=True):
""" Get the play at time t. If return_past is True,
will return the play at t-1 if the play at time t
is not yet available.
"""
# Initial value before the first play
if len(self.play_history) == 0:
return copy.deepcopy(self.default_play)
# If past play is in history, return most recent play
if t not in self.play_history and return_past:
return self.get_last_play()
assert(t in self.play_history)
return self.play_history[t]
def get_last_play(self):
""" Get the most recent play """
t_max = max(list(self.play_history.keys()))
return self.play_history[t_max]
| 31.949438
| 106
| 0.578161
|
4d7281e2fec545accf1aed0bae1ed701dc2bb1d6
| 2,687
|
py
|
Python
|
webEntregas/public_html/modelos/ADI_Bin/utilitarios/FCKeditor/editor/filemanager/connectors/py/fckconnector.py
|
kalebecalixto/webEntregas
|
e207f20e4b8b388bf4220018f20c78c7d8980094
|
[
"Apache-2.0"
] | 3
|
2016-05-09T15:41:29.000Z
|
2017-04-28T18:03:12.000Z
|
public/javascripts/fckeditor/editor/filemanager/connectors/py/fckconnector.py
|
suratpyari/tog_vault
|
d8a5c9c1daa8a43b88365fc7d2cdc354cb4676ab
|
[
"MIT"
] | 1
|
2019-09-29T02:13:43.000Z
|
2019-09-29T02:13:43.000Z
|
public/javascripts/fckeditor/editor/filemanager/connectors/py/fckconnector.py
|
lee/typo_old
|
20d9bfd2aaf38eee4a34dc527ced082313c75c79
|
[
"MIT"
] | 1
|
2021-07-13T07:20:18.000Z
|
2021-07-13T07:20:18.000Z
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| 29.527473
| 93
| 0.689989
|
43e2682c19140bdcb0b17073d47e0057b4176b2c
| 1,043
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
Jordon-Chen/recipe-app-api
|
8112102a8c0fc1e4e36267e273d6816db2f5e628
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
Jordon-Chen/recipe-app-api
|
8112102a8c0fc1e4e36267e273d6816db2f5e628
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
Jordon-Chen/recipe-app-api
|
8112102a8c0fc1e4e36267e273d6816db2f5e628
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-04-27 01:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.965517
| 118
| 0.604027
|
4c6ec2bbbdd316b1fddba5923343ea90c9f27559
| 1,429
|
py
|
Python
|
gesund_projekt/pomodoros/migrations/0004_remove_pomodoro_pomodoro_pomodoro_author_and_more.py
|
asis2016/gesund-projekt
|
cb3828b69cd6a86deeab16943e38b6ebffd86abb
|
[
"MIT"
] | null | null | null |
gesund_projekt/pomodoros/migrations/0004_remove_pomodoro_pomodoro_pomodoro_author_and_more.py
|
asis2016/gesund-projekt
|
cb3828b69cd6a86deeab16943e38b6ebffd86abb
|
[
"MIT"
] | null | null | null |
gesund_projekt/pomodoros/migrations/0004_remove_pomodoro_pomodoro_pomodoro_author_and_more.py
|
asis2016/gesund-projekt
|
cb3828b69cd6a86deeab16943e38b6ebffd86abb
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.1 on 2022-03-31 06:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pomodoros', '0003_remove_pomodoro_author'),
]
operations = [
migrations.RemoveField(
model_name='pomodoro',
name='pomodoro',
),
migrations.AddField(
model_name='pomodoro',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='pomodoro',
name='break_minutes',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='pomodoro',
name='pomodoro_minutes',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='pomodoro',
name='remarks',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='pomodoro',
name='id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
]
| 30.404255
| 121
| 0.596921
|
aabe7606851b6d03e4aa25a532528f40ee60780e
| 1,042
|
py
|
Python
|
docs_src/custom_request_and_route/tutorial003.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 53,007
|
2018-12-08T10:05:29.000Z
|
2022-03-31T23:30:02.000Z
|
docs_src/custom_request_and_route/tutorial003.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,155
|
2019-01-05T05:07:49.000Z
|
2022-03-31T21:25:38.000Z
|
docs_src/custom_request_and_route/tutorial003.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,092
|
2018-12-09T16:21:00.000Z
|
2022-03-31T07:59:45.000Z
|
import time
from typing import Callable
from fastapi import APIRouter, FastAPI, Request, Response
from fastapi.routing import APIRoute
class TimedRoute(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
before = time.time()
response: Response = await original_route_handler(request)
duration = time.time() - before
response.headers["X-Response-Time"] = str(duration)
print(f"route duration: {duration}")
print(f"route response: {response}")
print(f"route response headers: {response.headers}")
return response
return custom_route_handler
app = FastAPI()
router = APIRouter(route_class=TimedRoute)
@app.get("/")
async def not_timed():
return {"message": "Not timed"}
@router.get("/timed")
async def timed():
return {"message": "It's the time of my life"}
app.include_router(router)
| 26.05
| 70
| 0.666027
|
05af0c7e7016d3751a19a770c29a79212a1e37dd
| 1,526
|
py
|
Python
|
tests/test_websocket_notifications.py
|
systemallica/django-websocket-notifications
|
eae304b021eb14d818d3a1fa5dd18bf791eb4197
|
[
"MIT"
] | null | null | null |
tests/test_websocket_notifications.py
|
systemallica/django-websocket-notifications
|
eae304b021eb14d818d3a1fa5dd18bf791eb4197
|
[
"MIT"
] | null | null | null |
tests/test_websocket_notifications.py
|
systemallica/django-websocket-notifications
|
eae304b021eb14d818d3a1fa5dd18bf791eb4197
|
[
"MIT"
] | null | null | null |
from test_plus import APITestCase
from test_plus.test import TestCase, APITestCase
from tests.factories import UserFactory
from websocket_notifications.models import NotificationGroup
class NotificationGroupAPITest(APITestCase):
user_factory = UserFactory
def setUp(self):
self.user = self.make_user()
def test_create_notification_group(self):
self.assertEqual(0, NotificationGroup.objects.filter(user=self.user).count())
with self.login(self.user):
self.post("api_v1:notificationgroup-list")
self.response_201()
data = self.last_response.json()
self.assertIn("code", data)
self.assertEqual(1, NotificationGroup.objects.filter(user=self.user).count())
def test_create_notification_group_already_existing(self):
NotificationGroup.objects.get_or_create_for_user(user=self.user)
self.assertEqual(1, NotificationGroup.objects.filter(user=self.user).count())
with self.login(self.user):
self.post("api_v1:notificationgroup-list")
self.response_201()
data = self.last_response.json()
self.assertIn("code", data)
self.assertEqual(1, NotificationGroup.objects.filter(user=self.user).count())
class ListenerTest(TestCase):
user_factory = UserFactory
def setUp(self):
self.user = self.make_user()
def test_get_listener_view(self):
with self.login(self.user):
self.get("websocket_notifications:listener")
self.response_200()
| 33.173913
| 85
| 0.707077
|
17b07ca5fd01dc55a95b198e0fc0257ecd4e00fb
| 5,374
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20191201/get_service_endpoint_policy_definition.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20191201/get_service_endpoint_policy_definition.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20191201/get_service_endpoint_policy_definition.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetServiceEndpointPolicyDefinitionResult',
'AwaitableGetServiceEndpointPolicyDefinitionResult',
'get_service_endpoint_policy_definition',
]
@pulumi.output_type
class GetServiceEndpointPolicyDefinitionResult:
"""
Service Endpoint policy definitions.
"""
def __init__(__self__, description=None, etag=None, name=None, provisioning_state=None, service=None, service_resources=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service and not isinstance(service, str):
raise TypeError("Expected argument 'service' to be a str")
pulumi.set(__self__, "service", service)
if service_resources and not isinstance(service_resources, list):
raise TypeError("Expected argument 'service_resources' to be a list")
pulumi.set(__self__, "service_resources", service_resources)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the service endpoint policy definition resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def service(self) -> Optional[str]:
"""
Service endpoint name.
"""
return pulumi.get(self, "service")
@property
@pulumi.getter(name="serviceResources")
def service_resources(self) -> Optional[Sequence[str]]:
"""
A list of service resources.
"""
return pulumi.get(self, "service_resources")
class AwaitableGetServiceEndpointPolicyDefinitionResult(GetServiceEndpointPolicyDefinitionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceEndpointPolicyDefinitionResult(
description=self.description,
etag=self.etag,
name=self.name,
provisioning_state=self.provisioning_state,
service=self.service,
service_resources=self.service_resources)
def get_service_endpoint_policy_definition(resource_group_name: Optional[str] = None,
service_endpoint_policy_definition_name: Optional[str] = None,
service_endpoint_policy_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceEndpointPolicyDefinitionResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str service_endpoint_policy_definition_name: The name of the service endpoint policy definition name.
:param str service_endpoint_policy_name: The name of the service endpoint policy name.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceEndpointPolicyDefinitionName'] = service_endpoint_policy_definition_name
__args__['serviceEndpointPolicyName'] = service_endpoint_policy_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20191201:getServiceEndpointPolicyDefinition', __args__, opts=opts, typ=GetServiceEndpointPolicyDefinitionResult).value
return AwaitableGetServiceEndpointPolicyDefinitionResult(
description=__ret__.description,
etag=__ret__.etag,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
service=__ret__.service,
service_resources=__ret__.service_resources)
| 40.406015
| 178
| 0.681243
|
68ec182b91750adda3830ffbfbe77aac76a0636b
| 2,987
|
py
|
Python
|
app/models.py
|
DuncanArani/TALANTA
|
acad80af95bff9c3531816185d24ce9c7a7ae827
|
[
"MIT"
] | 1
|
2018-09-18T09:09:02.000Z
|
2018-09-18T09:09:02.000Z
|
app/models.py
|
DuncanArani/TALANTA
|
acad80af95bff9c3531816185d24ce9c7a7ae827
|
[
"MIT"
] | 3
|
2018-09-19T09:41:07.000Z
|
2018-09-19T15:10:45.000Z
|
app/models.py
|
koyoo-maxwel/TALANTA
|
8d6d3f47fbde3f6589382ce4be830345c51ac8cd
|
[
"MIT"
] | null | null | null |
from . import db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255))
email = db.Column(db.String(255), unique=True, index=True)
phone_number = db.Column(db.String(20))
sex = db.Column(db.String)
pass_secure = db.Column(db.String(255))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
vlogs = db.relationship('Talent', backref='user', lazy="dynamic")
comments = db.relationship('Comment', backref='user', lazy="dynamic")
def __repr__(self):
return f'User {self.username}'
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
'''
Talent model . Defines our talents' table . Creates a relationship between the table and our users table .
We need a way to query users' details .
'''
class Talent (db.Model):
__tablename__ = 'talents'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
talent_video_path = db.Column(db.String)
posted = db.Column(db.DateTime, index=True, default=datetime.utcnow)
description = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
category = db.Column(db.String)
comments = db.relationship('Comment', backref='talent', lazy="dynamic")
def save_talent(self):
db.session.add(self)
db.session.commit()
def delete_talent(self):
db.session.delete(self)
db.session.commit()
@classmethod
def fetch_videos(cls):
talents = Talent.query.all()
return talents
@classmethod
def search(cls, keywords):
results = Talent.query.filter_by(title=keywords).all()
return results
@classmethod
def fetch_by_category(cls, category):
talents = Talent.query.filter_by(category=category).all()
return talents
'''
Comment model . Defining our comments' table . Linking comments table with talents, table .
'''
class Comment (db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String)
talent_id = db.Column(db.Integer, db.ForeignKey("talents.id"))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
db.session.delete(self)
db.session.commit()
| 27.40367
| 107
| 0.68229
|
f6866631aa2e81323777661e7ad9afd898eadda6
| 1,781
|
py
|
Python
|
pyoanda/tests/_test_integration/integration_test_case.py
|
toloco/pyoanda
|
26b3f28a89d07c5c20d2a645884505387f1daae8
|
[
"MIT"
] | 81
|
2015-03-18T23:02:33.000Z
|
2021-07-13T15:00:14.000Z
|
pyoanda/tests/_test_integration/integration_test_case.py
|
toloco/pyoanda
|
26b3f28a89d07c5c20d2a645884505387f1daae8
|
[
"MIT"
] | 32
|
2015-04-18T22:04:00.000Z
|
2019-02-28T00:58:39.000Z
|
pyoanda/tests/_test_integration/integration_test_case.py
|
toloco/pyoanda
|
26b3f28a89d07c5c20d2a645884505387f1daae8
|
[
"MIT"
] | 32
|
2015-04-06T16:42:07.000Z
|
2018-02-13T19:06:19.000Z
|
from datetime import datetime, timedelta
try:
import unittest2 as unittest
except ImportError:
import unittest
from pyoanda import SANDBOX
from pyoanda.client import Client
from pyoanda.order import Order
class IntegrationTestCase(unittest.TestCase):
# Keep this as it will be share between all tests cases, prevent to over
# use as this literaly creates new users (I expext the use to be wipeout)
client = Client(SANDBOX)
user = client.create_account(currency="GBP")
client.account_id = user['accountId']
def build_order(self, immediate=False):
""" Build an order to be used with create_order.
Building an order is commonly required in the integration
tests, so this makes it easy.
Parameters
----------
immediate: bool
Whether to place an order that will be met immediately
or not; this is achieved by examining current prices and
bidding well below for non-immediate or by placing a
market order for immediate.
Returns an Order
"""
if immediate:
return Order(
instrument="GBP_USD",
units=1,
side="buy",
type="market"
)
expiry = datetime.utcnow() + timedelta(minutes=1)
prices = self.client.get_prices("GBP_USD", False)
price = prices['prices'][0]
at = round(price['bid'] * 0.9, 5)
# order must not be met straight away, otherwise we can't get it back
return Order(
instrument="GBP_USD",
units=1,
side="buy",
type="limit",
price=at,
expiry=expiry.isoformat()
)
| 30.706897
| 77
| 0.58731
|
9aa57af1c2bfdc06aee79c16628b45201552bcea
| 661
|
py
|
Python
|
tests/integrations/subprocess/test_json_parser.py
|
danyeaw/briefcase
|
fd9744e5b8dfc8a4c7606dc63cddfcda2dd00d78
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integrations/subprocess/test_json_parser.py
|
danyeaw/briefcase
|
fd9744e5b8dfc8a4c7606dc63cddfcda2dd00d78
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integrations/subprocess/test_json_parser.py
|
danyeaw/briefcase
|
fd9744e5b8dfc8a4c7606dc63cddfcda2dd00d78
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from briefcase.integrations.subprocess import ParseError, json_parser
@pytest.mark.parametrize(
"data, output",
[
('{"key": "value"}', {"key": "value"}),
(b'{"key": "value"}', {"key": "value"}),
]
)
def test_json_parser_success(data, output):
assert json_parser(data) == output
@pytest.mark.parametrize(
"data",
(
'This is a prologue in my JSON output :( \n\n{"key": "value"}',
b'This is a prologue in my JSON output :( \n\n{"key": "value"}'
)
)
def test_json_parser_fail(data):
with pytest.raises(ParseError, match="Failed to parse output as JSON:"):
json_parser(data)
| 24.481481
| 76
| 0.612708
|
f7e9b92aacf79ad90f89cb9a8d6aed1a830d2540
| 291
|
py
|
Python
|
oils/urls.py
|
ChristianJStarr/sbs-website
|
db891f0a67f46cc9cdadc95714304b2ea91a162a
|
[
"MIT"
] | 1
|
2022-01-09T18:54:32.000Z
|
2022-01-09T18:54:32.000Z
|
oils/urls.py
|
ChristianJStarr/sbs-website
|
db891f0a67f46cc9cdadc95714304b2ea91a162a
|
[
"MIT"
] | null | null | null |
oils/urls.py
|
ChristianJStarr/sbs-website
|
db891f0a67f46cc9cdadc95714304b2ea91a162a
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'oils'
urlpatterns = [
path('', views.all_patterns_views, name='patterns'),
path('<int:amount>/<int:offset>/', views.all_patterns_views, name='patterns'),
path('view/<id>/', views.single_pattern_views, name='view')
]
| 26.454545
| 82
| 0.690722
|
63e5026f952f8ea6be222fcca98eda84e91b67b2
| 2,683
|
py
|
Python
|
src/urllib3/__init__.py
|
cjerdonek/urllib3
|
728d9244665ef5b03103cb74d7b409ebe4f23b43
|
[
"MIT"
] | 553
|
2019-03-26T09:31:13.000Z
|
2022-03-31T05:25:22.000Z
|
src/urllib3/__init__.py
|
cjerdonek/urllib3
|
728d9244665ef5b03103cb74d7b409ebe4f23b43
|
[
"MIT"
] | 54
|
2015-12-14T12:51:51.000Z
|
2019-03-08T07:40:11.000Z
|
src/urllib3/__init__.py
|
cjerdonek/urllib3
|
728d9244665ef5b03103cb74d7b409ebe4f23b43
|
[
"MIT"
] | 56
|
2019-04-07T13:28:07.000Z
|
2022-03-25T15:59:07.000Z
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
__license__ = "MIT"
__version__ = "1.25.3"
__all__ = (
"HTTPConnectionPool",
"HTTPSConnectionPool",
"PoolManager",
"ProxyManager",
"HTTPResponse",
"Retry",
"Timeout",
"add_stderr_logger",
"connection_from_url",
"disable_warnings",
"encode_multipart_formdata",
"get_host",
"make_headers",
"proxy_from_url",
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug("Added a stderr logging handler to logger: %s", __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter("ignore", category)
| 30.83908
| 88
| 0.758852
|
cc8dae771e621db4e71f760ad0deb4be7dd378b5
| 747
|
py
|
Python
|
script/get_kernel_descriptor.py
|
SJTU-IPADS/reef-artifacts
|
8750974f2d6655525a2cc317bf2471914fe68dab
|
[
"Apache-2.0"
] | 7
|
2022-03-23T07:04:20.000Z
|
2022-03-30T02:44:42.000Z
|
script/get_kernel_descriptor.py
|
SJTU-IPADS/reef-artifacts
|
8750974f2d6655525a2cc317bf2471914fe68dab
|
[
"Apache-2.0"
] | null | null | null |
script/get_kernel_descriptor.py
|
SJTU-IPADS/reef-artifacts
|
8750974f2d6655525a2cc317bf2471914fe68dab
|
[
"Apache-2.0"
] | null | null | null |
import sys
import json
f = open(sys.argv[1], "r")
lines = f.readlines()
descriptors = {}
is_descriptor = False
for line in lines:
if line.find(".amdhsa_kernel ") != -1:
is_descriptor = True
continue
if line.find(".end_amdhsa_kernel") != -1:
is_descriptor = False
continue
if is_descriptor == False:
continue
parts = line.strip().split(" ")
key = parts[0]
value = parts[1]
if key in descriptors:
values = descriptors[key]
values.append(value)
descriptors[key] = list(set(values))
else:
values = []
values.append(value)
descriptors[key] = values
print(json.dumps(descriptors, sort_keys=True, indent=4))
| 19.657895
| 56
| 0.582329
|
f9e70e4315b0a7169fe091ed0c5cca33f3a289cb
| 1,758
|
py
|
Python
|
python/nrzs_encode.py
|
artemies/gr-kiss
|
2e0d74ac91976ce0bfdc9ffff2d84146f333ede9
|
[
"Unlicense"
] | null | null | null |
python/nrzs_encode.py
|
artemies/gr-kiss
|
2e0d74ac91976ce0bfdc9ffff2d84146f333ede9
|
[
"Unlicense"
] | null | null | null |
python/nrzs_encode.py
|
artemies/gr-kiss
|
2e0d74ac91976ce0bfdc9ffff2d84146f333ede9
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Daniel Estevez <daniel@destevez.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy
from gnuradio import gr
class nrzs_encode(gr.sync_block):
"""
docstring for block nrzs_encode
"""
def __init__(self):
gr.sync_block.__init__(self,
name="nrzs_encode",
in_sig=[numpy.uint8],
out_sig=[numpy.uint8])
self.state = 1
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
self.state = 1
for i, x in enumerate(in0):
out[i] = self.state = self.state ^ x ^ 1
##self.last = x
return len(out)
| 35.16
| 80
| 0.692264
|
18d42d30af43a68fb2d60dce41b80e66e85310a5
| 4,960
|
py
|
Python
|
juniper_official/Solutions/CGNAT/cgnat_multirow.py
|
vvikramb/healthbot-rules
|
72bdad144bebb512e9ac32d607b5924d96225334
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 43
|
2018-11-27T00:42:45.000Z
|
2022-02-24T01:19:39.000Z
|
juniper_official/Solutions/CGNAT/cgnat_multirow.py
|
vvikramb/healthbot-rules
|
72bdad144bebb512e9ac32d607b5924d96225334
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 266
|
2018-10-26T10:19:04.000Z
|
2022-03-16T04:38:29.000Z
|
juniper_official/Solutions/CGNAT/cgnat_multirow.py
|
vvikramb/healthbot-rules
|
72bdad144bebb512e9ac32d607b5924d96225334
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 99
|
2018-10-25T09:53:55.000Z
|
2021-12-07T09:51:59.000Z
|
#!/usr/bin/env python3
from __future__ import print_function, division
import requests, sys
from pprint import pprint as pp
from tand_udf import MultiRows
def get_ifl_stats(**kwargs):
print(kwargs, file=sys.stderr)
# Define and query influxdb
dg = (kwargs['device_group'])
did = (kwargs['device_id'])
print("DG", dg)
url = "http://influxdb:8086/query?db={0}:{1}&rp={0}:{1}".format(dg, did)
params = {
'q': 'select * from "service.cgnat/check-cgnat" where time > now() - 1800s group by * limit 1'
}
response = requests.get(url=url, params=params)
res = response.json()
#print("response :",res)
res_tags = []
res_fields = []
for i, value in enumerate(res['results'][0]['series']):
tags = ['', '', '', '']
# interface-name, internal-ip, pool-name, service-set-name
if "interface-name" in value['tags']:
tags[0] = value['tags']['interface-name']
if 'internal-ip' in value['tags']:
tags[1] = value['tags']['internal-ip']
if 'pool-name' in value['tags']:
tags[2] = value['tags']['pool-name']
if 'service-set-name' in value['tags']:
tags[3] = value['tags']['service-set-name']
res_tags.append(tags)
# session-count, ports-used, external-ip
fields = [0, 0, '']
for ind, field in enumerate(("session-count",
"ports-used", "external-ip")):
try:
field_index = value['columns'].index(field)
fields[ind] = value['values'][0][field_index]
except ValueError as exp:
print(exp)
res_fields.append(fields)
session_list = [row[0] for row in res_fields]
external_ip_list = [row[2] for row in res_fields]
internal_ip_list = [x[1] for x in res_tags]
avg_external_ip1 = avg_external_ip(external_ip_list)
max_session_count1 = max_session_count(session_list, internal_ip_list)
# max_session_count1 = max_session_count(session_list)
total_external_ip1 = total_external_ip(external_ip_list)
total_internal_ip1 = len(list(set([x[1] for x in res_tags])))
avg_session_count1 = avg_session_count(session_list, total_internal_ip1)
max_external_ip1 = max_external_ip(external_ip_list)
rows = MultiRows()
rows.add_row()
rows.add_field("avg_external_ip", avg_external_ip1)
rows.add_field("max_session_count", max_session_count1)
rows.add_field("total_external_ip", total_external_ip1)
rows.add_field("avg_session_count", avg_session_count1)
rows.add_field("total_internal_ip", total_internal_ip1)
rows.add_field("max_external_ip", max_external_ip1)
return rows
'''
This function returns average external ip address
'''
def avg_external_ip(list_external_ip):
ori_len = len(list_external_ip)
list_external_ip = set(list_external_ip)
final_len = len(list_external_ip)
utilization = 0
if ori_len > 0:
utilization = ori_len/final_len
return utilization
'''
This function returns average external ip address
'''
def avg_session_count(list_session_count, external_ip_count):
total_value = sum(list_session_count)
if external_ip_count > 0:
avg_value = total_value/external_ip_count
return round(avg_value,2)
return 0
'''
This function returns average external ip address
'''
def max_external_ip(list_external_ip):
a = [[x,list_external_ip.count(x)] for x in set(list_external_ip)]
max_value = 0
for i in a:
if i[1] > max_value:
max_value = i[1]
return max_value
'''
This function returns average external ip address
'''
#def max_session_count(list_session_count):
# max_value = 0
# if list_session_count:
# max_value = max(list_session_count)
# return max_value
def max_session_count(list_session_count, list_internal_ip):
max_value = 0
unique_session_count = dict()
for val1, val2 in zip(list_internal_ip, list_session_count):
if val1 in unique_session_count:
unique_session_count[val1] = unique_session_count[val1] + val2
else:
unique_session_count[val1] = val2
max_value = unique_session_count.get(max(unique_session_count, key=unique_session_count.get))
return max_value
# sum_session_list=[]
# already=[]
# if list_session_count:
# for x in list_internal_ip:
# c=0
# temp=[]
# for y in list_internal_ip:
# if((x==y) and (x not in already)):
# temp.append(list_session_count[c])
# c+=1
# sum_session_list.append(sum(temp))
# already.append(x)
# max_value = max(sum_session_list)
# return max_value
'''
This function returns average external ip address
'''
def total_external_ip(list_external_ip):
list_external_ip = set(list_external_ip)
final_len = len(list_external_ip)
return final_len
| 34.929577
| 102
| 0.656048
|
c5e53fe2b14ab8febb26cacc3873e784a6c16fd6
| 166
|
py
|
Python
|
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/SelectCount.py
|
Yosoyfr/tytus
|
0df7e656835a93458462e476f7ab858a33baa2e0
|
[
"MIT"
] | null | null | null |
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/SelectCount.py
|
Yosoyfr/tytus
|
0df7e656835a93458462e476f7ab858a33baa2e0
|
[
"MIT"
] | null | null | null |
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/SelectCount.py
|
Yosoyfr/tytus
|
0df7e656835a93458462e476f7ab858a33baa2e0
|
[
"MIT"
] | 4
|
2020-12-19T17:12:13.000Z
|
2021-01-07T20:29:53.000Z
|
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
class SelectCount(Instruccion):
def __init__(self,idtabla):
self.idtabla = idtabla
| 27.666667
| 69
| 0.789157
|
57d9d5a7a27aa2cd1e078083bab5f906bc4d365f
| 2,111
|
py
|
Python
|
solidity/python/PerformanceUniTestCrossConnector.py
|
frankwei98/contracts
|
18fa00ec598058459d96950523c3fc23d2c00bd6
|
[
"Apache-2.0"
] | 13
|
2018-09-18T09:55:27.000Z
|
2021-01-07T02:35:08.000Z
|
solidity/python/PerformanceUniTestCrossConnector.py
|
frankwei98/contracts
|
18fa00ec598058459d96950523c3fc23d2c00bd6
|
[
"Apache-2.0"
] | 47
|
2019-02-13T06:25:37.000Z
|
2021-07-30T05:23:44.000Z
|
solidity/python/PerformanceUniTestCrossConnector.py
|
frankwei98/contracts
|
18fa00ec598058459d96950523c3fc23d2c00bd6
|
[
"Apache-2.0"
] | 12
|
2018-09-05T07:13:33.000Z
|
2019-05-28T09:53:06.000Z
|
import Web3Wrapper
import InputGenerator
MINIMUM_VALUE_BALANCE = 100
MAXIMUM_VALUE_BALANCE = 10 ** 34
SAMPLES_COUNT_BALANCE = 50
MINIMUM_VALUE_WEIGHT = 100000
MAXIMUM_VALUE_WEIGHT = 900000
SAMPLES_COUNT_WEIGHT = 10
MINIMUM_VALUE_AMOUNT = 1
MAXIMUM_VALUE_AMOUNT = 10 ** 34
SAMPLES_COUNT_AMOUNT = 50
def Main():
rangeBalance1 = InputGenerator.UniformDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, SAMPLES_COUNT_BALANCE)
rangeWeight1 = InputGenerator.UniformDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, SAMPLES_COUNT_WEIGHT)
rangeBalance2 = InputGenerator.UniformDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, SAMPLES_COUNT_BALANCE)
rangeWeight2 = InputGenerator.UniformDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, SAMPLES_COUNT_WEIGHT)
rangeAmount = InputGenerator.UniformDistribution(MINIMUM_VALUE_AMOUNT, MAXIMUM_VALUE_AMOUNT, SAMPLES_COUNT_AMOUNT)
testNum = 0
numOfTests = len(rangeBalance1) * len(rangeWeight1) * len(rangeBalance2) * len(rangeWeight2) * len(rangeAmount)
tester = Web3Wrapper.Contract('BancorFormula').tester()
minGas = float('+inf')
maxGas = float('-inf')
totalGas = 0
countGas = 0
for balance1 in rangeBalance1:
for weight1 in rangeWeight1:
for balance2 in rangeBalance2:
for weight2 in rangeWeight2:
for amount in rangeAmount:
testNum += 1
if True:
try:
gas = tester.calculateCrossConnectorReturn(balance1, weight1, balance2, weight2, amount)
minGas = min(minGas, gas)
maxGas = max(maxGas, gas)
totalGas += gas
countGas += 1
print('Test {} out of {}: gas = {}, minimum = {}, maximum = {}, average = {}'.format(testNum, numOfTests, gas, minGas, maxGas, totalGas // countGas))
except:
pass
Main()
| 39.830189
| 181
| 0.63856
|
19b8162782b34a8c06d00967e7b59d5f0c456294
| 5,195
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/peering/v20190801preview/get_peering_service_prefix.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/peering/v20190801preview/get_peering_service_prefix.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/peering/v20190801preview/get_peering_service_prefix.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetPeeringServicePrefixResult',
'AwaitableGetPeeringServicePrefixResult',
'get_peering_service_prefix',
]
@pulumi.output_type
class GetPeeringServicePrefixResult:
"""
The peering service prefix class.
"""
def __init__(__self__, id=None, learned_type=None, name=None, prefix=None, prefix_validation_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if learned_type and not isinstance(learned_type, str):
raise TypeError("Expected argument 'learned_type' to be a str")
pulumi.set(__self__, "learned_type", learned_type)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if prefix and not isinstance(prefix, str):
raise TypeError("Expected argument 'prefix' to be a str")
pulumi.set(__self__, "prefix", prefix)
if prefix_validation_state and not isinstance(prefix_validation_state, str):
raise TypeError("Expected argument 'prefix_validation_state' to be a str")
pulumi.set(__self__, "prefix_validation_state", prefix_validation_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="learnedType")
def learned_type(self) -> Optional[str]:
"""
The prefix learned type
"""
return pulumi.get(self, "learned_type")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
Valid route prefix
"""
return pulumi.get(self, "prefix")
@property
@pulumi.getter(name="prefixValidationState")
def prefix_validation_state(self) -> Optional[str]:
"""
The prefix validation state
"""
return pulumi.get(self, "prefix_validation_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPeeringServicePrefixResult(GetPeeringServicePrefixResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPeeringServicePrefixResult(
id=self.id,
learned_type=self.learned_type,
name=self.name,
prefix=self.prefix,
prefix_validation_state=self.prefix_validation_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_peering_service_prefix(peering_service_name: Optional[str] = None,
prefix_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPeeringServicePrefixResult:
"""
The peering service prefix class.
:param str peering_service_name: The peering service name.
:param str prefix_name: The prefix name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['peeringServiceName'] = peering_service_name
__args__['prefixName'] = prefix_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:peering/v20190801preview:getPeeringServicePrefix', __args__, opts=opts, typ=GetPeeringServicePrefixResult).value
return AwaitableGetPeeringServicePrefixResult(
id=__ret__.id,
learned_type=__ret__.learned_type,
name=__ret__.name,
prefix=__ret__.prefix,
prefix_validation_state=__ret__.prefix_validation_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 35.340136
| 163
| 0.653705
|
0a80d3e297967a547ec821610e2656df1758d14b
| 3,155
|
py
|
Python
|
recordwhat/records/sub.py
|
mrakitin/recordwhat
|
c68b8fca69836bdba0075726e829325f2c8918a8
|
[
"BSD-3-Clause"
] | 1
|
2016-06-08T15:14:15.000Z
|
2016-06-08T15:14:15.000Z
|
recordwhat/records/sub.py
|
mrakitin/recordwhat
|
c68b8fca69836bdba0075726e829325f2c8918a8
|
[
"BSD-3-Clause"
] | 12
|
2016-02-11T15:01:05.000Z
|
2019-09-23T17:28:32.000Z
|
recordwhat/records/sub.py
|
mrakitin/recordwhat
|
c68b8fca69836bdba0075726e829325f2c8918a8
|
[
"BSD-3-Clause"
] | 4
|
2016-06-08T15:03:07.000Z
|
2019-09-23T17:05:38.000Z
|
from ophyd import (EpicsSignal, EpicsSignalRO)
from .. import (RecordBase, _register_record_type,
FieldComponent as Cpt)
@_register_record_type('sub')
class SubRecord(RecordBase):
alarm_status = Cpt(EpicsSignalRO, '.STAT')
last_value_alarmed = Cpt(EpicsSignalRO, '.LALM')
last_value_archived = Cpt(EpicsSignalRO, '.ALST')
last_value_monitored = Cpt(EpicsSignalRO, '.MLST')
prev_value_of_a = Cpt(EpicsSignalRO, '.LA')
prev_value_of_b = Cpt(EpicsSignalRO, '.LB')
prev_value_of_c = Cpt(EpicsSignalRO, '.LC')
prev_value_of_d = Cpt(EpicsSignalRO, '.LD')
prev_value_of_e = Cpt(EpicsSignalRO, '.LE')
prev_value_of_f = Cpt(EpicsSignalRO, '.LF')
prev_value_of_g = Cpt(EpicsSignalRO, '.LG')
prev_value_of_h = Cpt(EpicsSignalRO, '.LH')
prev_value_of_i = Cpt(EpicsSignalRO, '.LI')
prev_value_of_j = Cpt(EpicsSignalRO, '.LJ')
prev_value_of_k = Cpt(EpicsSignalRO, '.LK')
prev_value_of_l = Cpt(EpicsSignalRO, '.LL')
value_of_input_a = Cpt(EpicsSignal, '.A')
value_of_input_b = Cpt(EpicsSignal, '.B')
value_of_input_c = Cpt(EpicsSignal, '.C')
value_of_input_d = Cpt(EpicsSignal, '.D')
value_of_input_e = Cpt(EpicsSignal, '.E')
value_of_input_f = Cpt(EpicsSignal, '.F')
value_of_input_g = Cpt(EpicsSignal, '.G')
value_of_input_h = Cpt(EpicsSignal, '.H')
value_of_input_i = Cpt(EpicsSignal, '.I')
value_of_input_j = Cpt(EpicsSignal, '.J')
value_of_input_k = Cpt(EpicsSignal, '.K')
value_of_input_l = Cpt(EpicsSignal, '.L')
# - alarms
alarm_deadband = Cpt(EpicsSignal, '.HYST')
high_alarm_limit = Cpt(EpicsSignal, '.HIGH')
high_severity = Cpt(EpicsSignal, '.HSV')
hihi_alarm_limit = Cpt(EpicsSignal, '.HIHI')
hihi_severity = Cpt(EpicsSignal, '.HHSV')
lolo_alarm_limit = Cpt(EpicsSignal, '.LOLO')
lolo_severity = Cpt(EpicsSignal, '.LLSV')
low_alarm_limit = Cpt(EpicsSignal, '.LOW')
low_severity = Cpt(EpicsSignal, '.LSV')
# - display
archive_deadband = Cpt(EpicsSignal, '.ADEL')
display_precision = Cpt(EpicsSignal, '.PREC')
high_operating_rng = Cpt(EpicsSignal, '.HOPR')
low_operating_range = Cpt(EpicsSignal, '.LOPR')
monitor_deadband = Cpt(EpicsSignal, '.MDEL')
units_name = Cpt(EpicsSignal, '.EGU$', string=True)
# - inputs
input_a = Cpt(EpicsSignal, '.INPA$', string=True)
input_b = Cpt(EpicsSignal, '.INPB$', string=True)
input_c = Cpt(EpicsSignal, '.INPC$', string=True)
input_d = Cpt(EpicsSignal, '.INPD$', string=True)
input_e = Cpt(EpicsSignal, '.INPE$', string=True)
input_f = Cpt(EpicsSignal, '.INPF$', string=True)
input_g = Cpt(EpicsSignal, '.INPG$', string=True)
input_h = Cpt(EpicsSignal, '.INPH$', string=True)
input_i = Cpt(EpicsSignal, '.INPI$', string=True)
input_j = Cpt(EpicsSignal, '.INPJ$', string=True)
input_k = Cpt(EpicsSignal, '.INPK$', string=True)
input_l = Cpt(EpicsSignal, '.INPL$', string=True)
# - sub
bad_return_severity = Cpt(EpicsSignal, '.BRSV')
init_routine_name = Cpt(EpicsSignalRO, '.INAM$', string=True)
subroutine_name = Cpt(EpicsSignal, '.SNAM$', string=True)
| 42.066667
| 65
| 0.67607
|
5ab2bf6d834de300b12416a1835f6f0ef63b57e2
| 2,808
|
py
|
Python
|
ftl/python/builder_test.py
|
JonathanRRogers/runtimes-common
|
6b69050bed4389763ddff8e1b9ec48f12ac32fc5
|
[
"Apache-2.0"
] | 95
|
2016-09-09T23:36:36.000Z
|
2022-03-05T20:06:00.000Z
|
ftl/python/builder_test.py
|
JonathanRRogers/runtimes-common
|
6b69050bed4389763ddff8e1b9ec48f12ac32fc5
|
[
"Apache-2.0"
] | 392
|
2016-09-13T15:15:57.000Z
|
2022-02-22T01:18:23.000Z
|
ftl/python/builder_test.py
|
JonathanRRogers/runtimes-common
|
6b69050bed4389763ddff8e1b9ec48f12ac32fc5
|
[
"Apache-2.0"
] | 73
|
2016-09-08T19:27:03.000Z
|
2021-07-08T13:28:18.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import mock
import json
from ftl.common import context
from ftl.common import constants
from ftl.common import ftl_util
from ftl.python import builder
from ftl.python import layer_builder
_REQUIREMENTS_TXT = """
Flask==0.12.0
"""
_APP = """
import os
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello from Python!"
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
"""
class PythonTest(unittest.TestCase):
@mock.patch('containerregistry.client.v2_2.docker_image.FromRegistry')
def setUp(self, mock_from):
mock_from.return_value.__enter__.return_value = None
self.ctx = context.Memory()
self.ctx.AddFile("app.py", _APP)
args = mock.Mock()
args.name = 'gcr.io/test/test:latest'
args.base = 'gcr.io/google-appengine/python:latest'
args.entrypoint = None
args.python_cmd = 'python2.7'
args.pip_cmd = 'pip'
args.virtualenv_cmd = 'virtualenv'
args.tar_base_image_path = None
self.builder = builder.Python(self.ctx, args)
# constants.VIRTUALENV_DIR.replace('/', '') is used as the default path
# will give permissions errors in some build environments (eg: kokoro)
self.interpreter_builder = layer_builder.InterpreterLayerBuilder(
ftl_util.gen_tmp_dir(constants.VIRTUALENV_DIR.replace('/', '')),
self.builder._python_cmd, self.builder._virtualenv_cmd)
self.interpreter_builder._setup_virtualenv = mock.Mock()
self.builder._gen_package_lock_if_required = mock.Mock()
def test_build_interpreter_layer_ttl_written(self):
self.interpreter_builder.BuildLayer()
overrides = ftl_util.CfgDctToOverrides(
json.loads(self.interpreter_builder.GetImage().config_file()))
self.assertNotEqual(overrides.creation_time, "1970-01-01T00:00:00Z")
last_created = ftl_util.timestamp_to_time(overrides.creation_time)
now = datetime.datetime.now()
self.assertTrue(last_created > now - datetime.timedelta(days=2))
if __name__ == '__main__':
unittest.main()
| 33.428571
| 79
| 0.707621
|
f019dbcb74131fa21f7a2b53431b0d9dfb5d2a80
| 7,163
|
py
|
Python
|
kratos/python_scripts/KratosUnittest.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | null | null | null |
kratos/python_scripts/KratosUnittest.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | null | null | null |
kratos/python_scripts/KratosUnittest.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import, division
from KratosMultiphysics import Logger
from unittest import * # needed to make all functions available to the tests using this file
from unittest.util import safe_repr
from contextlib import contextmanager
import getopt
import sys
import os
class TestLoader(TestLoader):
def loadTestsFromTestCases(self, testCaseClasses):
''' Return a list of suites with all tests cases contained in every
testCaseClass in testCaseClasses '''
allTests = []
for caseClasses in testCaseClasses:
caseTests = self.loadTestsFromTestCase(caseClasses)
allTests.append(caseTests)
return allTests
class TestCase(TestCase):
@classmethod
def setUpClass(cls):
if (sys.version_info < (3, 2)):
cls.assertRaisesRegex = cls.assertRaisesRegexp
def run(self, result=None):
super(TestCase,self).run(result)
def failUnlessEqualWithTolerance(self, first, second, tolerance, msg=None):
''' fails if first and second have a difference greater than
tolerance '''
if first < (second - tolerance) or first > (second + tolerance):
raise self.failureException(msg or '%r != %r within %r places' % (first, second, tolerance))
def assertIsClose(self, first, second, rel_tol=None, abs_tol=None, msg=None):
"""Fail if the two objects are unequal as determined by their
absolute and relative difference
If the two objects compare equal then they will automatically
compare relative almost equal.
"""
if first == second:
# shortcut
return
if rel_tol is None:
rel_tol = 1e-09
if abs_tol is None:
abs_tol = 0.0
if isclose(first, second, rel_tol, abs_tol):
return
standardMsg = '%s != %s within %s rel-tol and %s abs-tol' % (safe_repr(first),
safe_repr(second),
rel_tol, abs_tol)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
assertEqualTolerance = failUnlessEqualWithTolerance
@contextmanager
def SupressConsoleOutput():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
@contextmanager
def SupressConsoleError():
with open(os.devnull, "w") as devnull:
old_stderr = sys.stderr
sys.stderr = devnull
try:
yield
finally:
sys.stderr = old_stderr
@contextmanager
def SupressAllConsole():
with open(os.devnull, "w") as devnull:
old_stderr = sys.stderr
old_stdout = sys.stdout
sys.stderr = devnull
sys.stdout = devnull
try:
yield
finally:
sys.stderr = old_stderr
sys.stdout = old_stdout
def Usage():
''' Prints the usage of the script '''
lines = [
'Usage:',
'\t python kratos_run_tests [-l level] [-v verbosity]',
'Options',
'\t -h, --help: Shows this command',
'\t -l, --level: Minimum level of detail of the tests: \'all\'(Default) \'(nightly)\' \'(small)\' \'(validation)\'', # noqa
'\t -v, --verbose: Verbosity level: 0, 1 (Default), 2',
'\t --using-mpi: If running in MPI and executing the MPI-tests'
]
for l in lines:
Logger.PrintInfo(l) # using the logger to only print once in MPI
def main():
# this deliberately overiddes the function "unittest.main",
# because it cannot parse extra command line arguments
if "--using-mpi" in sys.argv:
sys.argv.remove("--using-mpi") # has to be removed bcs unittest cannot parse it
import unittest
unittest.main()
def runTests(tests):
verbose_values = [0, 1, 2]
level_values = ['all', 'small', 'nightly', 'validation']
verbosity = 1
level = 'all'
is_mpi = False
# Parse Commandline
try:
opts, args = getopt.getopt(
sys.argv[1:],
'hv:l:', [
'help',
'verbose=',
'level=',
'using-mpi'
])
except getopt.GetoptError as err:
print(str(err))
Usage()
sys.exit(2)
for o, a in opts:
if o in ('-v', '--verbose'):
if int(a) in verbose_values:
verbosity = int(a)
else:
print('Error: {} is not a valid verbose level.'.format(a))
Usage()
sys.exit()
elif o in ('-h', '--help'):
Usage()
sys.exit()
elif o in ('-l', '--level'):
if a in level_values:
level = a
else:
print('Error: {} is not a valid level.'.format(a))
Usage()
sys.exit()
elif o in ('--using-mpi'):
is_mpi = True
else:
assert False, 'unhandled option'
if is_mpi:
level = "mpi_" + level
if tests[level].countTestCases() == 0:
print(
'[Warning]: "{}" test suite is empty'.format(level),
file=sys.stderr)
else:
result = not TextTestRunner(verbosity=verbosity, buffer=True).run(tests[level]).wasSuccessful()
sys.exit(result)
KratosSuites = {
'small': TestSuite(),
'nightly': TestSuite(),
'all': TestSuite(),
'validation': TestSuite(),
'mpi_small': TestSuite(),
'mpi_nightly': TestSuite(),
'mpi_all': TestSuite(),
'mpi_validation': TestSuite(),
}
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
'''Same implementation as math.isclose
self-implemented bcs math.isclose was only introduced in python3.5
see https://www.python.org/dev/peps/pep-0485/
'''
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class WorkFolderScope:
""" Helper-class to execute test in a specific target path
Input
-----
- rel_path_work_folder: String
Relative path of the target dir from the calling script
- file_path: String
Absolute path of the calling script
- add_to_path: Bool
"False" (default) if no need to add the target dir to the path, "True" otherwise.
"""
def __init__(self, rel_path_work_folder, file_path, add_to_path=False):
self.currentPath = os.getcwd()
self.add_to_path = add_to_path
if self.add_to_path:
self.currentPythonpath = sys.path
self.scope = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(file_path)), rel_path_work_folder))
def __enter__(self):
os.chdir(self.scope)
if self.add_to_path:
sys.path.append(self.scope)
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.currentPath)
if self.add_to_path:
sys.path = self.currentPythonpath
| 30.480851
| 132
| 0.577132
|
9a77b743492a5b0462b6a672622e363175742ea5
| 3,070
|
py
|
Python
|
projects/Deploying-a-Sentiment-Analysis-Model/serve/predict.py
|
gmendozah/deep-learning-with-pytorch
|
c46d68854722e029569654b1a3ef550871e7cfa3
|
[
"MIT"
] | null | null | null |
projects/Deploying-a-Sentiment-Analysis-Model/serve/predict.py
|
gmendozah/deep-learning-with-pytorch
|
c46d68854722e029569654b1a3ef550871e7cfa3
|
[
"MIT"
] | null | null | null |
projects/Deploying-a-Sentiment-Analysis-Model/serve/predict.py
|
gmendozah/deep-learning-with-pytorch
|
c46d68854722e029569654b1a3ef550871e7cfa3
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
words = review_to_words(input_data)
data_X, data_len = convert_and_pad(model.word_dict, words)
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
result = round(float(model(data).detach().numpy()))
return result
| 33.369565
| 107
| 0.702606
|
d7611a8db2284db3579879cecef39692063829a8
| 3,224
|
py
|
Python
|
script/polldb.py
|
sensfre/CabinetApproval
|
209b6ba9d34cbc47b4468be272d51956988dce30
|
[
"MIT"
] | null | null | null |
script/polldb.py
|
sensfre/CabinetApproval
|
209b6ba9d34cbc47b4468be272d51956988dce30
|
[
"MIT"
] | 6
|
2020-02-24T07:01:38.000Z
|
2020-06-13T23:30:07.000Z
|
script/polldb.py
|
sensfre/CabinetApproval
|
209b6ba9d34cbc47b4468be272d51956988dce30
|
[
"MIT"
] | null | null | null |
"""polldb.py
"""
import os
from datetime import datetime, timedelta
import numpy as np
from scipy.interpolate import interp1d
import fileio
def interp(xx, yy):
# 範囲外で返す fill_value を有効にするには、
# bounds_error を設定する必要がある。
return interp1d(xx, yy, fill_value=(yy[0], yy[-1]), bounds_error=False)
def t_max(pp):
return max([max(p.db['T']) for p in pp])
def t_min(pp):
return min([min(p.db['T']) for p in pp])
def load_sorted(fn):
""" 支持率/不支持率の読み込み。
Parameters
----------
fn, str : ファイル名(フルパス)
Returns
-------
buf, dict : 日時, 支持率, 不支持率 のリストの辞書
Notes
-----
各リストは日付順にソートしている
"""
data, names_ = fileio.load_core(fn)
names = [_ for _ in names_ if _[:4] != 'DATE']
data = data[np.argsort(data['DATE1'])] # 日付でソート
t1 = [fileio.sn_fm_dt(_) for _ in data['DATE1']]
t2 = [fileio.sn_fm_dt(_) for _ in data['DATE2']]
buf = {k:data[k] for k in names}
buf['T'] = np.array([(a + b)/2 for (a, b) in zip(t1, t2)])
return buf
class DB:
""" 調査結果を保持する。
以下を提供する。
日付 .db['T'][0..n]
発表値 .db['APP_RATE'][0..n], .db['NAP_RATE'][0..n], ...
長期平均 .ma['APP_RATE'](t), .ma['NAP_RATE'](t), ...
有効期間とデータフォルダーを指定してから使う。
set_data_folder(), set_span() を参照。
グラフ属性(識別名(ラベル), マーカー, 色)も保持しているが、
もう一段上の階層に移した方が良さそう。
"""
data_folder = None # データフォルダー '../data'
t0 = None # 分析期間 開始 (datetime インスタンス)
tf = None # 終了
@classmethod
def set_data_folder(kls, data_folder):
# 分析期間設定 (インスタンス共通)
#
kls.data_folder = data_folder
@classmethod
def set_span(kls, t0, tf):
# 分析期間設定 (インスタンス共通)
#
kls.t0 = t0
kls.tf = tf
def __init__(self, fn, label, marker, size, color='black'):
#
self.fn = fn # ファイル名
self.label = label # グラフのラベル
self.marker = marker # グラフのマーカー
self.size = size # マーカーの相対サイズ
self.color = color # グラフの色
self.db = self.load(fn)
self.interp = {k:interp(self.db['T'], self.db[k]) for k in self.db} # 直線補間(動画で利用)
def load(self, fn):
a = load_sorted(os.path.join(self.data_folder, fn)) # T Y N
ndx = (a['T'] >= fileio.sn_fm_dt(self.t0)) & (a['T'] <= fileio.sn_fm_dt(self.tf))
buf = {k: a[k][ndx] for k in a}
return buf
def calc_fact(db_list, k_app_nap, t_node, d_window):
""" 各社の感度を求める
Parameters
----------
t_node, 1d-array : 感度係数を求める日付のリスト
d_window, float [day] : 移動平均の窓幅 (過去 d_window 日に含まれる調査の平均を求める)
Note
----
ウィンドウ内の調査結果が 2 個未満の時は、感度の値を nan にする。
"""
def _mav(db, t):
p_window = max(0, db.db['T'][0] + d_window - t)
if 1:
p_window = d_window
ndx = (db.db['T'] >= t - d_window) & (db.db['T'] <= t + p_window)
y_ = db.db[k_app_nap][ndx]
if len(y_) >= 2:
ans = np.mean(y_)
else:
ans = np.NaN
return ans
yy = [[_mav(db, t) for t in t_node] for db in db_list]
ya = [np.nanmean(a) for a in zip(*yy)]
ff = [[a/b for a,b in zip(y, ya)] for y in yy]
return ff
| 26
| 89
| 0.531638
|
edb57071abf99966e94c8656b25342e67f2f5f6c
| 4,201
|
py
|
Python
|
src/make_windows.py
|
HLasse/wav2vec_finetune
|
084ab432ba4acbf5ce81267e2791fb36a0b70daa
|
[
"MIT"
] | 6
|
2021-09-10T08:18:07.000Z
|
2022-01-11T16:34:48.000Z
|
src/make_windows.py
|
HLasse/wav2vec_finetune
|
084ab432ba4acbf5ce81267e2791fb36a0b70daa
|
[
"MIT"
] | null | null | null |
src/make_windows.py
|
HLasse/wav2vec_finetune
|
084ab432ba4acbf5ce81267e2791fb36a0b70daa
|
[
"MIT"
] | 3
|
2021-09-10T09:46:17.000Z
|
2021-10-31T08:40:37.000Z
|
import numpy as np
import math
sig = np.arange(0, 20)
sampling_rate = 1
frame_length = 5
frame_stride = 2
zero_padding = True
def stack_frames(
sig,
sampling_rate,
frame_length,
frame_stride,
filter=lambda x: np.ones(
(x,
)),
zero_padding=True,
keep_short_signals=True,
remove_zero_padding=False):
"""Frame a signal into overlapping frames.
Args:
sig (array): The audio signal to frame of size (N,).
sampling_rate (int): The sampling frequency of the signal.
frame_length (float): The length of the frame in second.
frame_stride (float): The stride between frames.
filter (array): The time-domain filter for applying to each frame.
By default it is one so nothing will be changed.
zero_padding (bool): If the samples is not a multiple of
frame_length(number of frames sample), zero padding will
be done for generating last frame.
keep_short_signal: Return the original signal if shorter than frame_length.
remove_zero_padding: Remove trailing zeros from last element following zero padding
Returns:
array: Stacked_frames-Array of frames of size (number_of_frames x frame_len).
"""
# Check dimension
s = "Signal dimention should be of the format of (N,) but it is %s instead"
assert sig.ndim == 1, s % str(sig.shape)
signal_length = len(sig) / sampling_rate
if signal_length < frame_length:
if keep_short_signals:
return np.expand_dims(sig, axis=0)
else:
raise ValueError(f"Signal is shorter than frame length {signal_length} vs {frame_length}. Set `keep_short_signal` to True to return the original signal in such cases.")
# Initial necessary values
length_signal = sig.shape[0]
frame_sample_length = int(
np.round(
sampling_rate *
frame_length)) # Defined by the number of samples
frame_stride = float(np.round(sampling_rate * frame_stride))
# Zero padding is done for allocating space for the last frame.
if zero_padding:
# Calculation of number of frames
numframes = (int(math.ceil((length_signal
- frame_sample_length) / frame_stride)))
# below zero pads the last, above discards the last signal
#numframes = (int(math.ceil((length_signal
# - (frame_sample_length - frame_stride)) / frame_stride)))
# Zero padding
len_sig = int(numframes * frame_stride + frame_sample_length)
additive_zeros = np.zeros((len_sig - length_signal,))
signal = np.concatenate((sig, additive_zeros))
else:
# No zero padding! The last frame which does not have enough
# samples(remaining samples <= frame_sample_length), will be dropped!
numframes = int(math.floor((length_signal
- frame_sample_length) / frame_stride))
# new length
len_sig = int((numframes - 1) * frame_stride + frame_sample_length)
signal = sig[0:len_sig]
# Getting the indices of all frames.
indices = np.tile(np.arange(0,
frame_sample_length),
(numframes,
1)) + np.tile(np.arange(0,
numframes * frame_stride,
frame_stride),
(frame_sample_length,
1)).T
indices = np.array(indices, dtype=np.int32)
# Extracting the frames based on the allocated indices.
frames = signal[indices]
# Apply the windows function
window = np.tile(filter(frame_sample_length), (numframes, 1))
Extracted_Frames = frames * window
# doesn't work - can't change the shape of a signle array
if remove_zero_padding:
Extracted_Frames[-1] = np.trim_zeros(Extracted_Frames[-1], trim="b")
return Extracted_Frames
l = stack_frames(sig, sampling_rate, frame_length, frame_stride, remove_zero_padding=False)
| 37.846847
| 180
| 0.615092
|
d5483f6c3b8735de73e2cf7a51c8f3b3f2da50c9
| 611
|
py
|
Python
|
Binary Search/878. Nth Magical Number.py
|
Into-Y0u/Github-Baby
|
5e4e6b02f49c2c99533289be9d49911006cad919
|
[
"MIT"
] | 2
|
2022-01-25T04:30:26.000Z
|
2022-01-25T10:36:15.000Z
|
Binary Search/878. Nth Magical Number.py
|
Into-Y0u/Leetcode-Baby
|
681ad4df01ee908f76d888aa4ccc10f04c03c56f
|
[
"MIT"
] | null | null | null |
Binary Search/878. Nth Magical Number.py
|
Into-Y0u/Leetcode-Baby
|
681ad4df01ee908f76d888aa4ccc10f04c03c56f
|
[
"MIT"
] | null | null | null |
class Solution:
def nthMagicalNumber(self, n: int, a: int, b: int) -> int:
def gcd(a,b):
if a==0 :
return b
return gcd(b%a,a)
def lcm(a,b):
return (a*b) // gcd(a,b)
left,right = 1 , n*min(a,b)
ans = 0
while left <= right :
mid = (left+right)//2
target = (mid//a) + (mid//b) - (mid//lcm(a,b))
if target < n :
left = mid+1
else :
right = mid -1
ans = mid
return ans%(10**9+ 7)
| 26.565217
| 62
| 0.361702
|
bdf0f4975d721f61ae1228fddb8413d6f6161698
| 1,200
|
py
|
Python
|
airflow/operators/python_operator.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 5
|
2020-07-17T07:33:58.000Z
|
2022-03-02T06:23:47.000Z
|
airflow/operators/python_operator.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 7
|
2020-06-03T14:55:17.000Z
|
2021-12-30T00:01:50.000Z
|
airflow/operators/python_operator.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 12
|
2020-01-09T14:02:39.000Z
|
2022-01-24T07:18:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.operators.python`."""
import warnings
# pylint: disable=unused-import
from airflow.operators.python import ( # noqa
BranchPythonOperator, PythonOperator, PythonVirtualenvOperator, ShortCircuitOperator,
)
warnings.warn(
"This module is deprecated. Please use `airflow.operators.python`.",
DeprecationWarning, stacklevel=2
)
| 37.5
| 89
| 0.764167
|
f26a6115ab5ab6f17c27cc58d388d45f1bcd86b6
| 4,507
|
py
|
Python
|
src/python/zensols/deeplearn/batch/meta.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | 2
|
2021-04-30T17:19:14.000Z
|
2021-05-04T03:48:59.000Z
|
src/python/zensols/deeplearn/batch/meta.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | null | null | null |
src/python/zensols/deeplearn/batch/meta.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | null | null | null |
"""Contains container classes for batch data.
"""
__author__ = 'Paul Landes'
from typing import Dict, Type
from dataclasses import dataclass
from dataclasses import field as dc_field
import sys
from io import TextIOBase
from zensols.config import Dictable
from zensols.persist import persisted, PersistableContainer
from zensols.deeplearn import NetworkSettings
from zensols.deeplearn.vectorize import (
FeatureVectorizerManagerSet,
FeatureVectorizerManager,
FeatureVectorizer,
)
from . import (
DataPoint,
Batch,
BatchStash,
BatchFeatureMapping,
ManagerFeatureMapping,
FieldFeatureMapping,
)
@dataclass
class BatchFieldMetadata(Dictable):
"""Data that describes a field mapping in a batch object.
"""
field: FieldFeatureMapping = dc_field()
"""The field mapping."""
vectorizer: FeatureVectorizer = dc_field(repr=False)
"""The vectorizer used to map the field."""
@property
def shape(self):
return self.vectorizer.shape
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(self.field.attr, depth, writer)
self._write_line('field:', depth + 1, writer)
self.field.write(depth + 2, writer)
self._write_line('vectorizer:', depth + 1, writer)
self.vectorizer.write(depth + 2, writer)
@dataclass
class BatchMetadata(Dictable):
"""Describes metadata about a :class:`.Batch` instance.
"""
data_point_class: Type[DataPoint] = dc_field()
"""The :class:`.DataPoint` class, which are created at encoding time."""
batch_class: Type[Batch] = dc_field()
"""The :class:`.Batch` class, which are created at encoding time."""
mapping: BatchFeatureMapping = dc_field()
"""The mapping used for encoding and decoding the batch."""
fields_by_attribute: Dict[str, BatchFieldMetadata] = dc_field(repr=False)
"""Mapping by field name to attribute."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'data point: {self.data_point_class}', depth, writer)
self._write_line(f'batch: {self.batch_class}', depth, writer)
self._write_line('mapping:', depth, writer)
self.mapping.write(depth + 1, writer)
self._write_line('attributes:', depth, writer)
for attr, field in self.fields_by_attribute.items():
field.write(depth + 1, writer)
@dataclass
class BatchMetadataFactory(PersistableContainer):
"""Creates instances of :class:`.BatchMetadata`.
"""
stash: BatchStash = dc_field()
"""The stash used to create the batches."""
@persisted('_metadata')
def __call__(self) -> BatchMetadata:
stash: BatchStash = self.stash
batch: Batch = stash.batch_type(None, None, None, None)
batch.batch_stash = stash
mapping: BatchFeatureMapping = batch._get_batch_feature_mappings()
batch.deallocate()
vec_mng_set: FeatureVectorizerManagerSet = stash.vectorizer_manager_set
attrib_keeps = stash.decoded_attributes
vec_mng_names = set(vec_mng_set.keys())
by_attrib = {}
mmng: ManagerFeatureMapping
for mmng in mapping.manager_mappings:
vec_mng_name: str = mmng.vectorizer_manager_name
if vec_mng_name in vec_mng_names:
vec_mng: FeatureVectorizerManager = vec_mng_set[vec_mng_name]
field: FieldFeatureMapping
for field in mmng.fields:
if field.attr in attrib_keeps:
vec = vec_mng[field.feature_id]
by_attrib[field.attr] = BatchFieldMetadata(field, vec)
return BatchMetadata(stash.data_point_type, stash.batch_type,
mapping, by_attrib)
@dataclass
class MetadataNetworkSettings(NetworkSettings):
"""A network settings container that has metadata about batches it recieves for
its model.
"""
batch_metadata_factory: BatchMetadataFactory = dc_field()
"""The factory that produces the metadata that describe the batch data
during the calls to :py:meth:`_forward`.
"""
@property
def batch_metadata(self) -> BatchMetadata:
"""Return the batch metadata used by this model.
"""
# it's not necessary to persist here since the call in the factory
# already does; also cleaned up by the metadata factory as it extends
# from PersistableContainer
return self.batch_metadata_factory()
| 33.887218
| 83
| 0.6785
|
4e4b4b3ec7cc6743f7806a3acaea16d10889c635
| 1,211
|
py
|
Python
|
localflavor/generic/countries/sepa.py
|
torakses/django-localflavor
|
17ca87095d6f8c3f3888016085a2edb5951889f4
|
[
"BSD-3-Clause"
] | 1
|
2022-01-31T11:12:03.000Z
|
2022-01-31T11:12:03.000Z
|
localflavor/generic/countries/sepa.py
|
torakses/django-localflavor
|
17ca87095d6f8c3f3888016085a2edb5951889f4
|
[
"BSD-3-Clause"
] | 2
|
2021-04-22T09:02:20.000Z
|
2021-09-20T09:23:45.000Z
|
localflavor/generic/countries/sepa.py
|
torakses/django-localflavor
|
17ca87095d6f8c3f3888016085a2edb5951889f4
|
[
"BSD-3-Clause"
] | 1
|
2021-02-05T09:42:52.000Z
|
2021-02-05T09:42:52.000Z
|
# -*- coding: utf-8 -*-
#: European Payments Council list of SEPA scheme countries as of 2 Sep 2015.
#: http://www.europeanpaymentscouncil.eu/index.cfm/knowledge-bank/epc-documents/epc-list-of-sepa-scheme-countries/
IBAN_SEPA_COUNTRIES = (
'AT', # Austria
'BE', # Belgium
'BG', # Bulgaria
'CH', # Switzerland
'CY', # Cyprus
'CZ', # Czech Republic
'DE', # Germany
'DK', # Denmark
'EE', # Estonia
'ES', # Spain + Canary Islands
'FI', # Finland + Åland Islands
'FR', # France + French Guiana, Guadeloupe, Martinique, Mayotte, Réunion, Saint Barthélemy,
# Saint Martin (French part), Saint Pierre and Miquelon
'GB', # United Kingdom
'GI', # Gibraltar
'GR', # Greece
'HR', # Croatia
'HU', # Hungary
'IE', # Ireland
'IS', # Iceland
'IT', # Italy
'LI', # Liechtenstein
'LT', # Lithuania
'LU', # Luxembourg
'LV', # Latvia
'MC', # Monaco
'MT', # Malta
'NL', # Netherlands
'NO', # Norway
'PL', # Poland
'PT', # Portugal + Azores + Madeira
'RO', # Romania
'SE', # Sweden
'SI', # Slovenia
'SK', # Slovakia
'SM', # San Marino
)
| 28.162791
| 114
| 0.548307
|
f0cf4d5700f5caf57b03dc914312e74354a176bf
| 34,744
|
py
|
Python
|
netharn/util/util_slider.py
|
angiemsu/netharn
|
728cb40aad299baf62c689430d07b29c67d8cf21
|
[
"Apache-2.0"
] | null | null | null |
netharn/util/util_slider.py
|
angiemsu/netharn
|
728cb40aad299baf62c689430d07b29c67d8cf21
|
[
"Apache-2.0"
] | null | null | null |
netharn/util/util_slider.py
|
angiemsu/netharn
|
728cb40aad299baf62c689430d07b29c67d8cf21
|
[
"Apache-2.0"
] | null | null | null |
import ubelt as ub # NOQA
import cv2
import numpy as np
import netharn as nh
import torch
import torch.utils.data as torch_data
import itertools as it
class SlidingWindow(ub.NiceRepr):
"""
Generates basis for "sliding window" slices to break a large image into
smaller pieces. Use it.product to slide across the coordinates.
Notes:
This is a simpler version of SlidingSlices
Args:
shape (ndarray): shape of source array to slide across.
window (tuple): shape of window
overlap (float): a number between 0 and 1 indicating the fraction of
overlap that parts will have. Must be `0 <= overlap < 1`.
keepbound (bool): if True, a non-uniform stride will be taken to ensure
that the right / bottom of the image is returned as a slice if
needed. Such a slice will not obey the overlap constraints.
(Defaults to False)
Attributes:
basis_shape - shape of the grid corresponding to the number of strides
the sliding window will take.
basis_slices - slices that will be taken in every dimension
Yields:
Tuple[slice]: slices used for numpy indexing
Example:
>>> shape = (220, 220)
>>> window = (10, 10)
>>> self = SlidingWindow(shape, window, stride=5)
>>> list(self)[41:45]
[(slice(0, 10, None), slice(205, 215, None)),
(slice(0, 10, None), slice(210, 220, None)),
(slice(5, 15, None), slice(0, 10, None)),
(slice(5, 15, None), slice(5, 15, None))]
>>> print('self.overlap = {!r}'.format(self.overlap))
self.overlap = [0.5, 0.5]
Example:
>>> shape = (4, 4)
>>> window = (3, 3)
>>> self = SlidingWindow(shape, window, stride=1)
>>> list(zip(self.centers, self.slices))
[((1.0, 1.0), (slice(0, 3, None), slice(0, 3, None))),
((1.0, 2.0), (slice(0, 3, None), slice(1, 4, None))),
((2.0, 1.0), (slice(1, 4, None), slice(0, 3, None))),
((2.0, 2.0), (slice(1, 4, None), slice(1, 4, None)))]
>>> shape = (3, 3)
>>> window = (2, 2)
>>> self = SlidingWindow(shape, window, stride=1)
>>> list(zip(self.centers, self.slices))
[((0.5, 0.5), (slice(0, 2, None), slice(0, 2, None))),
((0.5, 1.5), (slice(0, 2, None), slice(1, 3, None))),
((1.5, 0.5), (slice(1, 3, None), slice(0, 2, None))),
((1.5, 1.5), (slice(1, 3, None), slice(1, 3, None)))]
Example:
>>> shape = (16, 16)
>>> window = (4, 4)
>>> self = SlidingWindow(shape, window, overlap=(.5, .25))
>>> print('self.stride = {!r}'.format(self.stride))
self.stride = [2, 3]
>>> list(ub.chunks(self.grid, 5))
[[(0, 0), (0, 1), (0, 2), (0, 3), (0, 4)],
[(1, 0), (1, 1), (1, 2), (1, 3), (1, 4)],
[(2, 0), (2, 1), (2, 2), (2, 3), (2, 4)],
[(3, 0), (3, 1), (3, 2), (3, 3), (3, 4)],
[(4, 0), (4, 1), (4, 2), (4, 3), (4, 4)],
[(5, 0), (5, 1), (5, 2), (5, 3), (5, 4)],
[(6, 0), (6, 1), (6, 2), (6, 3), (6, 4)]]
"""
def __init__(self, shape, window, overlap=None, stride=None,
keepbound=False, allow_overshoot=False):
assert len(window) == len(shape), (
'incompatible dims: {} {}'.format(len(window),
len(shape)))
assert all(d <= D for d, D in zip(window, shape)), (
'window must be smaller than target')
stride, overlap = self._compute_stride(overlap, stride, shape,
window)
if not all(stride):
raise ValueError(
'Step must be positive everywhere. Got={}'.format(stride))
stide_kw = [dict(margin=d, stop=D, step=s, keepbound=keepbound,
check=not keepbound and not allow_overshoot)
for d, D, s in zip(window, shape, stride)]
undershot_shape = []
overshoots = []
for kw in stide_kw:
final_pos = (kw['stop'] - kw['margin'])
n_steps = final_pos // kw['step']
overshoot = final_pos % kw['step']
undershot_shape.append(n_steps + 1)
overshoots.append(overshoot)
if not allow_overshoot and any(overshoots):
raise ValueError('overshoot={} stide_kw={}'.format(overshoots,
stide_kw))
# make a slice generator for each dimension
self.stride = stride
self.overlap = overlap
self.window = window
# The undershot basis shape, only contains indices that correspond
# perfectly to the input. It may crop a bit of the ends. If this is
# equal to basis_shape, then the self perfectly fits the input.
self.undershot_shape = undershot_shape
# NOTE: if we have overshot, then basis shape will not perfectly
# align to the original image. This shape will be a bit bigger.
self.basis_slices = [list(nh.util.wide_strides_1d(**kw))
for kw in stide_kw]
self.basis_shape = [len(b) for b in self.basis_slices]
self.n_total = np.prod(self.basis_shape)
def __nice__(self):
return '{}, stride={}'.format(self.basis_shape, self.stride)
def _compute_stride(self, overlap, stride, shape, window):
"""
Ensures that stride hasoverlap the correct shape. If stride is not provided,
compute stride from desired overlap.
"""
if not (overlap is None) ^ (stride is None):
raise ValueError('specify overlap({}) XOR stride ({})'.format(
overlap, stride))
if stride is None:
if not isinstance(overlap, (list, tuple)):
overlap = [overlap] * len(window)
if any(frac < 0 or frac >= 1 for frac in overlap):
raise ValueError((
'part overlap was {}, but fractional overlaps must be '
'in the range [0, 1)').format(overlap))
stride = [int(round(d - d * frac))
for frac, d in zip(overlap, window)]
else:
if not isinstance(stride, (list, tuple)):
stride = [stride] * len(window)
# Recompute fractional overlap after integer stride is computed
overlap = [(d - s) / d for s, d in zip(stride, window)]
assert len(stride) == len(shape), 'incompatible dims'
return stride, overlap
def __len__(self):
return self.n_total
def _iter_basis_frac(self):
for slices in self._iter_slices():
frac = [sl.start / D for sl, D in zip(slices, self.source.shape)]
yield frac
def _iter_basis_idxs(self):
basis_indices = map(range, self.basis_shape)
for basis_idxs in it.product(*basis_indices):
yield basis_idxs
def _iter_slices(self):
for slices in it.product(*self.basis_slices):
yield slices
def __iter__(self):
yield from self._iter_slices()
@property
def grid(self):
return self._iter_basis_idxs()
@property
def slices(self):
return self._iter_slices()
@property
def centers(self):
for slices in self._iter_slices():
center = tuple(sl_.start + (sl_.stop - sl_.start - 1) / 2 for sl_ in slices)
yield center
class SlidingSlices(ub.NiceRepr):
"""
Generates basis for "sliding window" slices to break a large image into
smaller pieces. Use it.product to slide across the coordinates.
Args:
source (ndarray): array to slice across. It is typically best to ensure
this is in CHW or CDHW format for maximum compatibility.
target_shape (tuple): (chan, depth, height, width) of the window
(be sure to include channels). CHW or CDHW format.
overlap (float): a number between 0 and 1 indicating the fraction of
overlap that parts will have. Must be `0 <= overlap < 1`.
keepbound (bool): if True, a non-uniform step will be taken to ensure
that the right / bottom of the image is returned as a slice if
needed. Such a slice will not obey the overlap constraints.
(Defaults to False)
Attributes:
basis_shape - shape of the grid corresponding to the number of steps
the sliding window will take.
basis_slices - slices that will be taken in every dimension
Yields:
tuple(slice, slice): row and column slices used for numpy indexing
Example:
>>> source = np.zeros((220, 220))
>>> target_shape = (10, 10)
>>> slider = SlidingSlices(source, target_shape, step=5)
>>> list(slider.slices)[41:45]
[(slice(0, 10, None), slice(205, 215, None)),
(slice(0, 10, None), slice(210, 220, None)),
(slice(5, 15, None), slice(0, 10, None)),
(slice(5, 15, None), slice(5, 15, None))]
>>> print('slider.overlap = {!r}'.format(slider.overlap))
slider.overlap = [0.5, 0.5]
Example:
>>> source = np.zeros((250, 200, 200))
>>> target_shape = (10, 10, 10)
>>> slider = SlidingSlices(source, target_shape, step=(1, 2, 2))
>>> chip = next(slider.chips)
>>> print('chip.shape = {!r}'.format(chip.shape))
chip.shape = (10, 10, 10)
Example:
>>> source = np.zeros((16, 16))
>>> target_shape = (4, 4)
>>> slider = SlidingSlices(source, target_shape, overlap=(.5, .25))
>>> print('slider.step = {!r}'.format(slider.step))
slider.step = [2, 3]
>>> list(ub.chunks(slider.grid, 5))
[[(0, 0), (0, 1), (0, 2), (0, 3), (0, 4)],
[(1, 0), (1, 1), (1, 2), (1, 3), (1, 4)],
[(2, 0), (2, 1), (2, 2), (2, 3), (2, 4)],
[(3, 0), (3, 1), (3, 2), (3, 3), (3, 4)],
[(4, 0), (4, 1), (4, 2), (4, 3), (4, 4)],
[(5, 0), (5, 1), (5, 2), (5, 3), (5, 4)],
[(6, 0), (6, 1), (6, 2), (6, 3), (6, 4)]]
"""
def __init__(slider, source, target_shape, overlap=None, step=None,
keepbound=False, allow_overshoot=False):
img_shape = source.shape
assert len(target_shape) == len(img_shape), (
'incompatible dims: {} {}'.format(len(target_shape),
len(img_shape)))
assert all(d <= D for d, D in zip(target_shape, img_shape)), (
'window must be smaller than target')
step, overlap = slider._compute_step(overlap, step, img_shape,
target_shape)
if not all(step):
raise ValueError(
'Step must be positive everywhere. Got={}'.format(step))
stide_kw = [dict(margin=d, stop=D, step=s, keepbound=keepbound,
check=not keepbound and not allow_overshoot)
for d, D, s in zip(target_shape, img_shape, step)]
undershot_shape = []
overshoots = []
for kw in stide_kw:
final_pos = (kw['stop'] - kw['margin'])
n_steps = final_pos // kw['step']
overshoot = final_pos % kw['step']
undershot_shape.append(n_steps + 1)
overshoots.append(overshoot)
if not allow_overshoot and any(overshoots):
raise ValueError('overshoot={} stide_kw={}'.format(overshoots,
stide_kw))
# make a slice generator for each dimension
slider.step = step
slider.overlap = overlap
slider.source = source
slider.window = target_shape
# The undershot basis shape, only contains indices that correspond
# perfectly to the input. It may crop a bit of the ends. If this is
# equal to basis_shape, then the slider perfectly fits the input.
slider.undershot_shape = undershot_shape
# NOTE: if we have overshot, then basis shape will not perfectly
# align to the original image. This shape will be a bit bigger.
slider.basis_slices = [list(nh.util.wide_strides_1d(**kw))
for kw in stide_kw]
slider.basis_shape = [len(b) for b in slider.basis_slices]
slider.n_total = np.prod(slider.basis_shape)
def __nice__(slider):
return '{}, step={}'.format(slider.basis_shape, slider.step)
def _compute_step(slider, overlap, step, img_shape, target_shape):
"""
Ensures that step hasoverlap the correct shape. If step is not provided,
compute step from desired overlap.
"""
if not (overlap is None) ^ (step is None):
raise ValueError('specify overlap({}) XOR step ({})'.format(
overlap, step))
if step is None:
if not isinstance(overlap, (list, tuple)):
overlap = [overlap] * len(target_shape)
if any(frac < 0 or frac >= 1 for frac in overlap):
raise ValueError((
'part overlap was {}, but fractional overlaps must be '
'in the range [0, 1)').format(overlap))
step = [int(round(d - d * frac))
for frac, d in zip(overlap, target_shape)]
else:
if not isinstance(step, (list, tuple)):
step = [step] * len(target_shape)
# Recompute fractional overlap after integer step is computed
overlap = [(d - s) / d for s, d in zip(step, target_shape)]
assert len(step) == len(img_shape), 'incompatible dims'
return step, overlap
def __len__(slider):
return slider.n_total
def _iter_basis_frac(slider):
for slices in slider._iter_slices():
frac = [sl.start / D for sl, D in zip(slices, slider.source.shape)]
yield frac
def _iter_basis_idxs(slider):
basis_indices = map(range, slider.basis_shape)
for basis_idxs in it.product(*basis_indices):
yield basis_idxs
def _iter_slices(slider):
for slices in it.product(*slider.basis_slices):
yield slices
def _iter_chips(slider):
for slices in slider._iter_slices():
chip = slider.source[slices]
yield chip
def __iter__(slider):
yield from zip(slider.slices, slider.chips)
@property
def grid(self):
return self._iter_basis_idxs()
@property
def slices(self):
return self._iter_slices()
@property
def chips(self):
return self._iter_chips()
def to_dataset(self):
slider_dset = SlidingIndexDataset(self)
return slider_dset
def clf_upscale_transform(slider, dims=(-2, -1)):
"""
Find transformation to upscale a single scalar classification for each
window back to the spatial resolution of the original data.
FIXME:
This contains bugs that will cause slight alignment errors.
NOTE:
returned scales are not correct
* This does work when the window size is 1x1
* This does work when the step size is 1
Args:
dims (tuple): indices of the spatial (height and width) dimensions
Example:
>>> source = np.zeros((3, 25, 25))
>>> window = (3, 5, 5)
>>> step = 2
>>> slider = SlidingSlices(source, window, step=step)
>>> dims = (-2, -1)
>>> # Make dummy predicted data
>>> pred_shape = list(ub.take(slider.basis_shape, dims))
>>> pred = np.arange(slider.n_total).reshape(pred_shape)
>>> # upscale using computed transforms
>>> (yscale, xscale), padding, prepad_shape = slider.clf_upscale_transform(dims)
>>> cv2.resize(pred.astype(np.uint8), prepad_shape)[0].shape
>>> resized = cv2.resize(pred.astype(np.uint8), prepad_shape)
>>> resized = np.pad(resized, padding, mode='constant')
>>> # FIXME: Following scale doesnt work right
>>> nh.util.imscale(pred.astype(np.uint8), (xscale, yscale))[0].shape
"""
def slcenter(sl):
""" center of the window defined by a slice """
return sl.start + (sl.stop - sl.start - 1) / 2
def slstep(slices):
return slices[1].start - slices[0].start
ydim, xdim = dims
# Get the height / width of the original data we want to resize to
orig_h = slider.source.shape[ydim]
orig_w = slider.source.shape[xdim]
# Find the windows corresponding to x and y spatial dimensions
yslices = slider.basis_slices[ydim]
xslices = slider.basis_slices[xdim]
# The step size between points corresponds to an integer scale factor?
# FIXME: This is wrong. Should scale be a function of step and window?
yscale = slstep(yslices)
xscale = slstep(xslices)
# Find padding to account for sliding window boundary effects
# FIXME: is this really how big the padding should be?
top = int(np.floor(slcenter(yslices[0])))
left = int(np.floor(slcenter(xslices[0])))
bot = yslices[-1].stop - int(np.floor(slcenter(yslices[-1]))) - 1
right = xslices[-1].stop - int(np.floor(slcenter(xslices[-1]))) - 1
padding = ((top, bot), (left, right))
# Find the shape we will upscale to before padding
# updscale + padding should result in the original shape
# for some reason my initial thought on how to calculate this indirectly failed
prepad_h = orig_h - left - right
prepad_w = orig_w - top - bot
prepad_shape = (prepad_h, prepad_w)
pred_h, pred_w = list(ub.take(slider.basis_shape, dims))
# prepad_h / pred_h
# prepad_w / pred_w
# Note:
# when we do this correctly, it is possible padding may be negative
# if the stride is less than the window size. This is because scale
# should simply be scaling to the point where the extend of the
# predicted pixels touches each other but does not overlap.
# This would mean:
# * translating by half the window width + .5 (so odd kernels are
# aligned with center pixels, and even kernels are aligned between
# boundaries)
# * scaling by half the stride to make the exent of each pixel touch
# * padding by half the window size minus half the stride. Or clipping
# by that amount if it is negative
return (yscale, xscale), padding, prepad_shape
def upscale_overlay(slider, pred, dims=(-2, -1)):
"""
Upscales a prediction computed at each point in the sliding window to
overlay on top of the original spatial resolution (albiet coarsley)
TODO:
handle the case where overshoots happen, should there be an extra
translation to account for them? Or does this scheme already take
that into account?
It does not because the steps might be nonlinear when keepbound=True,
but when it is False the steps are linear and this does handle it.
Example:
>>> source = np.zeros((3, 11, 11))
>>> window = (3, 5, 5)
>>> step = 6
>>> slider = SlidingSlices(source, window, step=step)
>>> dims = (-2, -1)
>>> # Make dummy predicted data
>>> pred_shape = list(ub.take(slider.basis_shape, dims))
>>> pred = np.arange(1, slider.n_total + 1).reshape(pred_shape).astype(np.float)
>>> # upscale using computed transforms
>>> upscaled = slider.upscale_overlay(pred)
Example:
>>> source = np.zeros((3, 20, 20))
>>> window = (3, 3, 3)
>>> step = 6
>>> slider = SlidingSlices(source, window, step=step, allow_overshoot=True)
>>> dims = (-2, -1)
>>> # Make dummy predicted data
>>> pred_shape = list(ub.take(slider.basis_shape, dims))
>>> pred = np.arange(1, slider.n_total + 1).reshape(pred_shape).astype(np.float)
>>> # upscale using computed transforms
>>> upscaled = slider.upscale_overlay(pred)
"""
# We can model this with a simple affine transform. First allocate the
# required output size, then construct the transform. Padding and
# cropping will occur naturally.
ydim, xdim = dims
# Get the height / width of the original data we want to resize to
orig_h = slider.source.shape[ydim]
orig_w = slider.source.shape[xdim]
# First scale, then translate
sy = slider.step[ydim]
sx = slider.step[xdim]
ty = slider.window[ydim] / 2 - .5
tx = slider.window[xdim] / 2 - .5
aff = np.array([
[sx, 0, tx],
[ 0, sy, ty],
])
dsize = (orig_w, orig_h)
if pred.dtype.kind == 'i':
upscaled = cv2.warpAffine(pred, aff, dsize, flags=cv2.INTER_NEAREST)
else:
upscaled = cv2.warpAffine(pred, aff, dsize, flags=cv2.INTER_LINEAR)
return upscaled
class SlidingIndexDataset(torch_data.Dataset):
"""
Faster loading of slices at cost of memory
slider_dset = SlidingIndexDataset(slider)
slider_loader = torch_data.DataLoader(slider_dset, shuffle=False, batch_size=128)
slider_iter = iter(slider_loader)
batch = next(slider_iter)
"""
def __init__(slider_dset, slider):
slider_dset.slider = slider
def __len__(slider_dset):
return slider_dset.slider.n_total
# return np.prod(slider.basis_shape)
def __getitem__(slider_dset, index):
slider = slider_dset.slider
basis_idx = np.unravel_index(index, slider.basis_shape)
slices = [bdim[i] for bdim, i in zip(slider.basis_slices, basis_idx)]
chip = slider.source[slices]
tensor_chip = torch.FloatTensor(chip)
tensor_basis_idx = torch.LongTensor(np.array(basis_idx))
return tensor_basis_idx, tensor_chip
class Stitcher(ub.NiceRepr):
"""
Restitches smaller image patches / pixels into a larger output. This is
used to invert the SlidingSlicer. For semenatic segmentation the patches
are probability chips. Overlapping chips are averaged together.
Args:
shape (tuple): dimensions of the large image that will be created from
the smaller pixels or patches.
Example:
>>> import sys
>>> # Build a high resolution image and slice it into chips
>>> highres = np.random.rand(5, 200, 200).astype(np.float32)
>>> target_shape = (1, 50, 50)
>>> slider = SlidingSlices(highres, target_shape, overlap=(0, .5, .5))
>>> # Show how Sticher can be used to reconstruct the original image
>>> stitcher = Stitcher(slider.source.shape)
>>> for sl, chip in list(slider):
... stitcher.add(sl, chip)
>>> assert stitcher.weights.max() == 4, 'some parts should be processed 4 times'
>>> recon = stitcher.finalize()
"""
def __init__(stitcher, shape, xpu='numpy'):
stitcher.shape = shape
stitcher.xpu = xpu
if xpu == 'numpy':
stitcher.sums = np.zeros(shape, dtype=np.float32)
stitcher.weights = np.zeros(shape, dtype=np.float32)
stitcher.sumview = stitcher.sums.ravel()
stitcher.weightview = stitcher.weights.ravel()
else:
stitcher.sums = xpu.move(torch.zeros(shape))
stitcher.weights = xpu.move(torch.zeros(shape))
stitcher.sumview = stitcher.sums.view(-1)
stitcher.weightview = stitcher.weights.view(-1)
stitcher._cumprod = np.cumprod(list(shape[::-1][:-1]))[::-1]
stitcher._cumprod = torch.LongTensor(np.array(stitcher._cumprod))
def __nice__(stitcher):
return str(stitcher.sums.shape)
def add(stitcher, indices, patch, weight=None):
"""
Incorporate a new (possibly overlapping) patch or pixel using a
weighted sum.
Args:
indices (slice or tuple): typically a slice of pixels or a single
pixel, but this can be any numpy fancy index.
patch (ndarray): data to patch into the bigger image.
weight (float or ndarray): weight of this patch (default to 1.0)
"""
if weight is None:
stitcher.sums[indices] += patch
stitcher.weights[indices] += 1.0
else:
stitcher.sums[indices] += (patch * weight)
stitcher.weights[indices] += weight
def add_fast(stitcher, batch_idxs, values, weight=None, assume_order=True):
"""
new faster version
Ignore:
stitcher = velocity_sticher
values = vel_np
import ubelt
for timer in ubelt.Timerit(10, bestof=1):
with timer:
stitcher_add(stitcher, batch_idxs, values, assume_order=False)
import ubelt
for timer in ubelt.Timerit(10, bestof=1):
with timer:
stitcher_add(stitcher, batch_idxs, values, assume_order=True)
import ubelt
batch_idxs_tuple = list(map(tuple, batch_idxs))
for timer in ubelt.Timerit(10, bestof=1):
with timer:
for indices, vel in zip(batch_idxs_tuple, vel_np):
velocity_sticher.add(indices, vel)
Example:
>>> import sys
>>> # Build a high resolution image and slice it into chips
>>> frames = np.random.rand(1, 200, 100, 100).astype(np.float32)
>>> window = (frames.shape[0], 15, 15, 15)
>>> slider = SlidingSlices(frames, window, step=(1, 1, 1, 1))
>>> dset = slider.to_dataset()
>>> n_classes = 2
>>> xpu = nh.XPU(None)
>>> stitcher = Stitcher(slider.basis_shape[1:] + [n_classes], xpu=xpu)
>>> loader = torch.utils.data.DataLoader(dset, batch_size=10)
>>> batch_iter = iter(loader)
>>> batch = next(batch_iter)
>>> batch_idxs_tensors_, chips = batch
>>> invar = torch.autograd.Variable(chips)
>>> conv = torch.nn.Conv3d(frames.shape[0], n_classes, window[1:])
>>> values = conv(invar).data
>>> # remove channel
>>> weight = None
>>> batch_idxs = batch_idxs_tensors_[:, 1:]
>>> stitcher.add_fast(batch_idxs, values, weight, assume_order=True)
Time:
torch.cuda.init()
weight = None
import ubelt as ub
xpu = nh.XPU(0)
values = xpu.move(values)
stitcher = Stitcher(slider.basis_shape[1:] + [n_classes], xpu=xpu)
for timer in ub.Timerit(100, bestof=10, label='gpu'):
with timer:
stitcher.add_fast(batch_idxs, values, weight, assume_order=True)
stitcher = Stitcher(slider.basis_shape[1:] + [n_classes], xpu='numpy')
batch_idxs_np = batch_idxs.numpy()
values_np = values.cpu().numpy()
for timer in ub.Timerit(100, bestof=10, label='numpy'):
with timer:
stitcher.add_fast(batch_idxs_np, values_np, weight, assume_order=True)
Benchmark:
>>> import sys
>>> # setup benchmark
>>> frames = np.random.rand(1, 50, 100, 100).astype(np.float32)
>>> window = (frames.shape[0], 20, 20, 20)
>>> slider = SlidingSlices(frames, window, step=(1, 1, 1, 1))
>>> dset = slider.to_dataset()
>>> loader = torch.utils.data.DataLoader(dset, batch_size=1024)
>>> n_classes = 2
>>> xpu = nh.XPU(1)
>>> conv = torch.nn.Conv3d(window[0], n_classes, window[1:])
>>> conv = xpu.move(conv)
>>> #weight = torch.rand(n_classes, 1, 1, 1)[None, :]
>>> #weight = xpu.move(weight)
>>> #weight_np = weight.cpu().numpy()
>>> weight = weight_np = None
>>> # do dummy computation to warm up gpu
>>> conv(xpu.variable(dset[0][1][None, :]))
>>> torch.set_grad_enabled(False)
>>> conv.train(False)
>>> base_shape = slider.basis_shape[1:]
>>> # ---------------------------------------
>>> # Benchmark on-gpu stitching with pytorch
>>> import tqdm
>>> t1 = ub.Timerit(3, bestof=3, label='gpu')
>>> for timer in tqdm.tqdm(t1, total=3, leave=True):
>>> with timer:
>>> stitcher = Stitcher(base_shape + [n_classes], xpu=xpu)
>>> for batch in loader:
>>> batch_idxs_tensors_, chips = batch
>>> invar = xpu.variable(chips, async=True)
>>> values = conv(invar).data
>>> batch_idxs = batch_idxs_tensors_[:, 1:].numpy()
>>> stitcher.add_fast(batch_idxs, values, weight,
>>> assume_order=True)
>>> # ---------------------------------------
>>> # Benchmark on-cpu stitching with numpy
>>> t2 = ub.Timerit(3, bestof=3, label='numpy')
>>> for timer in tqdm.tqdm(t2, total=3, leave=True):
>>> with timer:
>>> stitcher = Stitcher(base_shape + [n_classes], xpu='numpy')
>>> for batch in iter(loader):
>>> batch_idxs_tensors_, chips = batch
>>> invar = xpu.variable(chips, async=True)
>>> values_np = conv(invar).data.cpu().numpy()
>>> batch_idxs_np = batch_idxs_tensors_[:, 1:].numpy()
>>> stitcher.add_fast(batch_idxs_np, values_np,
>>> weight_np, assume_order=True)
>>> # VERDICT:
>>> # Async GPU stitching gives a minor but insignificant speedup
>>> # GPU: time per loop: best=4.394 s, mean=4.394 ± 0.0 s
>>> # NUMPY: time per loop: best=4.876 s, mean=4.876 ± 0.0 s
"""
if stitcher.xpu != 'numpy':
# ON GPU STITCHING
n_classes = stitcher.shape[-1]
end = batch_idxs.shape[0] - 1
t_base_multi_idxs = batch_idxs[[0, end]]
# we dont need a trailing 1 because we arent padding extra zeros
cumprod = stitcher._cumprod[None :]
ravel_idxs_range = (t_base_multi_idxs * cumprod).sum(dim=1)
first = ravel_idxs_range[0]
last = ravel_idxs_range[-1] + n_classes
ravel_sl = slice(first, last)
ravel_index = ravel_sl
if weight is None:
stitcher.sumview[ravel_index] += values.view(-1)
stitcher.weightview[ravel_index] += 1.0
else:
stitcher.sumview[ravel_index] += (values * weight).view(-1)
stitcher.weightview[ravel_index] += weight.view(-1)
else:
# TODO: maybe check if the input is a tensor?
shape = stitcher.shape
n_classes = shape[-1]
# if we assume we get data in order, its even faster
if assume_order:
last = batch_idxs.shape[0] - 1
base_multi_idxs = tuple(batch_idxs[[0, last]].T)
# Add extra dimension for output classes
extra_multi_idxs = np.zeros(2, dtype=np.int)
multi_idxs_range = base_multi_idxs + (extra_multi_idxs,)
ravel_idxs_range = np.ravel_multi_index(multi_idxs_range, dims=shape)
first = ravel_idxs_range[0]
last = ravel_idxs_range[-1] + n_classes
ravel_sl = slice(first, last)
ravel_index = ravel_sl
else:
base_multi_idxs = tuple(batch_idxs.T)
extra_multi_idxs = np.zeros(len(batch_idxs), dtype=np.int)
# The indices for the 0-th class (which should be the last dimension)
multi_idxs_first = base_multi_idxs + (extra_multi_idxs,)
ravel_idxs_first = np.ravel_multi_index(multi_idxs_first, dims=shape)
# The indices for the next classes should be sequentially after
all_ravel_idxs = [ravel_idxs_first[None, :]]
for i in range(1, n_classes):
all_ravel_idxs.append((ravel_idxs_first + i)[None, :])
# raveled indices that correspond with raveled data
ravel_idxs = np.vstack(all_ravel_idxs).T.ravel()
# assert np.sum(1 - np.diff(ravel_idxs)), 'we cant assume order'
ravel_index = ravel_idxs
if weight is None:
stitcher.sumview[ravel_index] += values.ravel()
stitcher.weightview[ravel_index] += 1.0
else:
stitcher.sumview[ravel_index] += (values * weight).ravel()
stitcher.weightview[ravel_index] += np.ravel(weight)
def average(stitcher):
"""
Averages out contributions from overlapping adds using weighted average
Returns:
out: ndarray: the stitched image
"""
out = stitcher.sums / stitcher.weights
return out
def finalize(stitcher, frame_ids=None):
"""
Averages out contributions from overlapping adds
Args:
frame_ids(None or slice or tuple): if subset is not None, this is
done for only a region of the larger tensor, otherwise it is
done for the entire tensor.
TODO: rename frame_ids subset
Returns:
final: ndarray: the stitched image
"""
if frame_ids is None:
final = stitcher.sums / stitcher.weights
else:
final = stitcher.sums[frame_ids] / stitcher.weights[frame_ids]
if stitcher.xpu != 'numpy':
final = final.cpu().numpy()
final = np.nan_to_num(final)
return final
if __name__ == '__main__':
"""
CommandLine:
python -m netharn.util.util_slider all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| 40.636257
| 92
| 0.557391
|
f219e590daf56ae1b1dd62ba2b91aa2f47e9d461
| 51,255
|
py
|
Python
|
tensorflow/python/keras/engine/functional.py
|
Mithilesh1609/tensorflow
|
63f70b5611d7f50512ea26295d26016c2704901b
|
[
"Apache-2.0"
] | 2
|
2020-06-16T22:01:14.000Z
|
2020-06-20T10:10:26.000Z
|
tensorflow/python/keras/engine/functional.py
|
3ecurityy/tensorflow
|
f8c0e68a8aa5d575a19129ec67c9ed6262652082
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/keras/engine/functional.py
|
3ecurityy/tensorflow
|
f8c0e68a8aa5d575a19129ec67c9ed6262652082
|
[
"Apache-2.0"
] | 1
|
2020-05-19T23:48:24.000Z
|
2020-05-19T23:48:24.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""A `Network` is way to compose layers: the topological form of a `Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_layer as input_layer_module
from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.saving.saved_model import network_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
# pylint: disable=g-classes-have-attributes
class Functional(training_lib.Model):
"""A `Functional` model is a `Model` defined as a directed graph of layers.
Three types of `Model` exist: subclassed `Model`, `Functional` model,
and `Sequential` (a special case of `Functional`).
In general, more Keras features are supported with `Functional`
than with subclassed `Model`s, specifically:
- Model cloning (`keras.models.clone`)
- Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()`
- Whole-model saving (`model.save()`)
A `Functional` model can be instantiated by passing two arguments to
`__init__`. The first argument is the `keras.Input` Tensors that represent
the inputs to the model. The second argument specifies the output
tensors that represent the outputs of this model. Both arguments can be a
nested structure of tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2'])
model = keras.Model(inputs, outputs)
```
A `Functional` model constructed using the Functional API can also include raw
TensorFlow functions, with the exception of functions that create Variables
or assign ops.
Example:
```
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = tf.nn.relu(x)
model = keras.Model(inputs, outputs)
```
Arguments:
inputs: List of input tensors (must be created via `tf.keras.Input()`).
outputs: List of outputs tensors.
name: String, optional. Name of the model.
trainable: Boolean, whether the model's variables should be trainable.
"""
# See tf.Module for the usage of this property.
# The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to
# flatten the key since it is trying to convert Trackable/Layer to a string.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_layer_call_argspecs', '_compiled_trainable_state',
'_output_mask_cache', '_output_tensor_cache', '_output_shape_cache'),
training_lib.Model._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, inputs=None, outputs=None, name=None, trainable=True):
# generic_utils.validate_kwargs(
# kwargs, {'name', 'trainable'},
# 'Functional models may only specify `name` and `trainable` keyword '
# 'arguments during initialization. Got an unexpected argument:')
super(Functional, self).__init__(name=name, trainable=trainable)
self._init_graph_network(inputs, outputs)
@trackable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs):
# This method is needed for Sequential to reinitialize graph network when
# layer is added or removed.
self._is_graph_network = True
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:
inputs = inputs[0]
if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:
outputs = outputs[0]
self._nested_inputs = inputs
self._nested_outputs = outputs
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
# Models constructed with a single Tensor or list of Tensors can
# be called with a dict, where the keys of the dict are the names
# of the `Input` objects. Extra keys are ignored.
self._enable_dict_to_input_mapping = (
not nest.is_sequence(self._nested_inputs) or
(isinstance(self._nested_inputs, (list, tuple)) and
not any(nest.is_sequence(t) for t in self._nested_inputs)))
if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
base_layer_utils.create_keras_history(self._nested_outputs)
self._validate_graph_inputs_and_outputs()
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._build_input_shape = nest.map_structure(lambda x: x.shape, inputs)
self._compute_output_and_mask_jointly = True
# `_expects_training_arg` is True since the `training` argument is always
# present in the signature of the `call` method of a graph network.
self._expects_training_arg = True
self._expects_mask_arg = True
# A graph network does not autocast inputs, as its layers will cast them
# instead.
self._autocast = False
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, _ = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layer_call_argspecs = {}
for layer in self._layers:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
layer._attribute_sentinel.add_parent(self._attribute_sentinel)
# Build self.input_names and self.output_names.
self._set_output_names()
self.input_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for layer in self._input_layers:
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
# Use batch_input_shape here because non-eager composite tensors may not
# have a shape attribute that's meaningful (sparse, for instance, has
# a tensor that's non-constant and needs to be fed). This means that
# input layers that create placeholders will need to have the
# batch_input_shape attr to allow for input shape validation.
self._feed_input_shapes.append(layer._batch_input_shape)
self._feed_inputs.append(layer.input)
self._compute_tensor_usage_count()
self._set_save_spec(self._nested_inputs)
tf_utils.assert_no_legacy_layers(self.layers)
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
return self._nested_inputs
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
return nest.map_structure(backend.int_shape, self.input)
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
return self._nested_outputs
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
return nest.map_structure(backend.int_shape, self.output)
def _set_output_names(self):
"""Assigns unique names to the Network's outputs.
Output layers with multiple output tensors would otherwise lead to duplicate
names in self.output_names.
"""
uniquified = []
output_names = set()
prefix_count = {}
for layer in self._output_layers:
proposal = layer.name
while proposal in output_names:
existing_count = prefix_count.get(layer.name, 1)
proposal = '{}_{}'.format(layer.name, existing_count)
prefix_count[layer.name] = existing_count + 1
output_names.add(proposal)
uniquified.append(proposal)
self.output_names = uniquified
@property
def _layer_checkpoint_dependencies(self):
"""Dictionary of layer dependencies to be included in the checkpoint."""
weight_layer_index = 0
dependencies = collections.OrderedDict()
for layer_index, layer in enumerate(self.layers):
try:
if layer.weights:
# Keep a separate index for layers which have weights. This allows
# users to insert Layers without weights anywhere in the network
# without breaking checkpoints.
dependencies['layer_with_weights-%d' % weight_layer_index] = layer
weight_layer_index += 1
except ValueError:
# The layer might have weights, but may not be built yet. We just treat
# it as layer without weight.
pass
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Trackable dependencies.
dependencies['layer-%d' % layer_index] = layer
return dependencies
@property
def _checkpoint_dependencies(self):
dependencies = [
trackable.TrackableReference(name=name, ref=layer)
for name, layer in self._layer_checkpoint_dependencies.items()]
dependencies.extend(super(Functional, self)._checkpoint_dependencies)
return dependencies
def _lookup_dependency(self, name):
layer_dependencies = self._layer_checkpoint_dependencies
if name in layer_dependencies:
return layer_dependencies[name]
return super(Functional, self)._lookup_dependency(name)
def _handle_deferred_layer_dependencies(self, layers):
"""Handles layer checkpoint dependencies that are added after init."""
layer_checkpoint_dependencies = self._layer_checkpoint_dependencies
layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()}
for layer in layers:
if layer in layer_to_name:
self._handle_deferred_dependencies(name=layer_to_name[layer],
trackable=layer)
@property
def _should_compute_mask(self):
return True
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
output_tensors = self._run_internal_graph(inputs, mask=mask)
return nest.map_structure(lambda t: t._keras_mask, output_tensors)
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
return self._run_internal_graph(
inputs, training=training, mask=mask)
def compute_output_shape(self, input_shape):
# Convert any shapes in tuple format to TensorShapes.
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
# Use the tuple of TensorShape as the cache key, since tuple is hashable
# and can be used as hash key.
try:
cache_key = tuple(tf_utils.convert_shapes(input_shape, to_tuples=True))
if cache_key in self._output_shape_cache:
# Cache hit. Return shapes as TensorShapes.
return self._output_shape_cache[cache_key]
except ValueError:
# In case there are unknown TensorShape, eg for sparse tensor input,
# We skip the caching since the shape is unknown.
pass
layers_to_output_shapes = {}
for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor..
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
layer = node.layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Get the input shapes for the first argument of the node
layer_input_shapes = []
layer_inputs = node.call_args[0]
for layer_input in nest.flatten(layer_inputs):
kh = layer_input._keras_history
input_layer_key = kh.layer.name + '_%s_%s' % (kh.node_index,
kh.tensor_index)
layer_input_shapes.append(layers_to_output_shapes[input_layer_key])
layer_input_shapes = nest.pack_sequence_as(layer_inputs,
layer_input_shapes)
# Layers expect shapes to be tuples for `compute_output_shape`.
layer_input_shapes = tf_utils.convert_shapes(
layer_input_shapes, to_tuples=True)
layer_output_shapes = layer.compute_output_shape(layer_input_shapes)
# Convert back to TensorShapes.
layer_output_shapes = tf_utils.convert_shapes(
layer_output_shapes, to_tuples=False)
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j, shape in enumerate(nest.flatten(layer_output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = shape
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
# Return shapes as TensorShapes.
return output_shapes
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output tensors for new inputs.
# Note:
- Can be run on non-Keras tensors.
Arguments:
inputs: Tensor or nested structure of Tensors.
training: Boolean learning phase.
mask: (Optional) Tensor or nested structure of Tensors.
Returns:
output_tensors
"""
inputs = self._flatten_to_reference_inputs(inputs)
if mask is None:
masks = [None] * len(inputs)
else:
masks = self._flatten_to_reference_inputs(mask)
for input_t, mask in zip(inputs, masks):
input_t._keras_mask = mask
# Dictionary mapping reference tensors to computed tensors.
tensor_dict = {}
tensor_usage_count = self._tensor_usage_count
for x, y in zip(self.inputs, inputs):
y = self._conform_to_reference_input(y, ref_input=x)
x_id = str(id(x))
tensor_dict[x_id] = [y] * tensor_usage_count[x_id]
nodes_by_depth = self._nodes_by_depth
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = nodes_by_depth[depth]
for node in nodes:
if node.is_input:
continue # Input tensors already exist.
if any(t_id not in tensor_dict for t_id in node.flat_input_ids):
continue # Node is not computable, try skipping.
args, kwargs = node.map_arguments(tensor_dict)
outputs = node.layer(*args, **kwargs)
# Update tensor_dict.
for x_id, y in zip(node.flat_output_ids, nest.flatten(outputs)):
tensor_dict[x_id] = [y] * tensor_usage_count[x_id]
output_tensors = []
for x in self.outputs:
x_id = str(id(x))
assert x_id in tensor_dict, 'Could not compute output ' + str(x)
output_tensors.append(tensor_dict[x_id].pop())
return nest.pack_sequence_as(self._nested_outputs, output_tensors)
def _flatten_to_reference_inputs(self, tensors):
"""Maps `tensors` to their respective `keras.Input`."""
if self._enable_dict_to_input_mapping and isinstance(tensors, dict):
ref_inputs = self._nested_inputs
if not nest.is_sequence(ref_inputs):
ref_inputs = [self._nested_inputs]
try:
# Flatten in the order `Input`s were passed during Model construction.
return [tensors[inp._keras_history.layer.name] for inp in ref_inputs]
except KeyError:
# TODO(b/151582614)
return nest.flatten(tensors)
# Otherwise both self.inputs and tensors will already be in same order.
return nest.flatten(tensors)
def _conform_to_reference_input(self, tensor, ref_input):
"""Set shape and dtype based on `keras.Input`s."""
# Shape handling (only for non-CompositeTensors).
if isinstance(tensor, ops.Tensor) and isinstance(ref_input, ops.Tensor):
# Allow (None,) and (None, 1) Tensors to be passed interchangably. Use the
# shape specified by the `keras.Input`.
if tensor.shape.rank is not None and ref_input.shape.rank is not None:
should_squeeze_last_dim = (
tensor.shape.rank == ref_input.shape.rank + 1 and
tensor.shape[-1] == 1)
should_expand_last_dim = (
tensor.shape.rank == ref_input.shape.rank - 1 and
ref_input.shape[-1] == 1)
if should_squeeze_last_dim:
tensor = array_ops.squeeze_v2(tensor, axis=-1)
elif should_expand_last_dim:
tensor = array_ops.expand_dims_v2(tensor, axis=-1)
# Add shape hints to Tensors that might have None shape dims but have
# shapes defined by the `keras.Input`.
try:
tensor.set_shape(tensor.shape.merge_with(ref_input.shape))
except ValueError:
logging.warning(
'Model was constructed with shape {} for input {}, but it was '
'called on an input with incompatible shape {}.'.format(
ref_input.shape, ref_input, tensor.shape))
# Dtype handling.
if isinstance(ref_input, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor = math_ops.cast(tensor, dtype=ref_input.dtype)
return tensor
def get_config(self):
return copy.deepcopy(get_network_config(self))
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
input_tensors, output_tensors, created_layers = reconstruct_from_config(
config, custom_objects)
model = cls(inputs=input_tensors, outputs=output_tensors,
name=config.get('name'))
connect_ancillary_layers(model, created_layers)
return model
def _validate_graph_inputs_and_outputs(self):
"""Validates the inputs and outputs of a Graph Network."""
# Check for redundancy in inputs.
if len({id(i) for i in self.inputs}) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer = x._keras_history.layer
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and not layer._inbound_nodes[0].is_input):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history.layer)
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _insert_layers(self, layers, relevant_nodes=None):
"""Inserts Layers into the Network after Network creation.
This is only valid for Keras Graph Networks. Layers added via this function
will be included in the `call` computation and `get_config` of this Network.
They will not be added to the Network's outputs.
Arguments:
layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs.
relevant_nodes: Nodes from the Layers that should be considered part of
this Network. If `None`, all Nodes will be considered part of this
Network.
Raises:
ValueError: If the layers depend on `Input`s not found in this Model.
"""
layers = nest.flatten(layers)
tf_utils.assert_no_legacy_layers(layers)
node_to_depth = {}
for depth, nodes in self._nodes_by_depth.items():
node_to_depth.update({node: depth for node in nodes})
# The nodes of these Layers that are relevant to this Network. If not
# provided, assume all Nodes are relevant
if not relevant_nodes:
relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])
network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
def _get_min_depth(node):
"""Gets the minimum depth at which node can be computed."""
min_depth = 0
for layer, node_id, _, _ in node.iterate_inbound():
inbound_node = layer._inbound_nodes[node_id]
if inbound_node in node_to_depth:
min_depth = min(min_depth, node_to_depth[inbound_node])
elif inbound_node not in network_nodes:
continue
else:
# Previous relevant nodes haven't been processed yet.
return None
# New node is one shallower than its shallowest input.
return min_depth - 1
# Insert nodes into `_nodes_by_depth` and other node attrs.
unprocessed_nodes = copy.copy(relevant_nodes)
i = 0
while unprocessed_nodes:
i += 1
# Do a sanity check. This can occur if `Input`s from outside this Model
# are being relied on.
if i > 10000:
raise ValueError('Layers could not be added due to missing '
'dependencies.')
node = unprocessed_nodes.pop(0)
depth = _get_min_depth(node)
if depth is None: # Defer until inbound nodes are processed.
unprocessed_nodes.append(node)
continue
node_key = _make_node_key(node.layer.name,
node.layer._inbound_nodes.index(node))
if node_key not in self._network_nodes:
node_to_depth[node] = depth
self._network_nodes.add(node_key)
self._nodes_by_depth[depth].append(node)
# Insert layers and update other layer attrs.
layer_set = set(self._layers)
deferred_layers = []
for layer in layers:
if layer not in layer_set:
self._layers.append(layer)
deferred_layers.append(layer)
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
# This allows the added layer to broadcast mutations to the current
# layer, which is necessary to ensure cache correctness.
layer._attribute_sentinel.add_parent(self._attribute_sentinel)
layer_set.add(layer)
self._handle_deferred_layer_dependencies(deferred_layers)
self._compute_tensor_usage_count()
def _compute_tensor_usage_count(self):
"""Compute the #. of tensor usages for all the output tensors of layers.
The computed tensor usage count is saved as `self._tensor_usage_count`. This
is later used for saving memory in eager computation by releasing
no-longer-needed tensors as early as possible.
"""
tensor_usage_count = collections.Counter()
available_tensors = set(str(id(tensor)) for tensor in self.inputs)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
depth_keys = depth_keys[1:]
for depth in depth_keys:
for node in self._nodes_by_depth[depth]:
input_tensors = {
str(id(tensor)) for tensor in nest.flatten(node.keras_inputs)
}
if input_tensors.issubset(available_tensors):
for tensor in nest.flatten(node.keras_inputs):
tensor_usage_count[str(id(tensor))] += 1
for output_tensor in nest.flatten(node.outputs):
available_tensors.add(str(id(output_tensor)))
for tensor in self.outputs:
tensor_usage_count[str(id(tensor))] += 1
self._tensor_usage_count = tensor_usage_count
def _assert_weights_created(self):
# Override the implementation in Model.
# The Functional model should always have weight created already.
return
def _graph_network_add_loss(self, symbolic_loss):
new_nodes, new_layers = _map_subgraph_network(self.inputs, [symbolic_loss])
# Losses must be keyed on inputs no matter what in order to be supported in
# DistributionStrategy.
add_loss_layer = base_layer.AddLoss(
unconditional=False, dtype=symbolic_loss.dtype)
add_loss_layer(symbolic_loss)
new_nodes.extend(add_loss_layer.inbound_nodes)
new_layers.append(add_loss_layer)
self._insert_layers(new_layers, new_nodes)
def _graph_network_add_metric(self, value, aggregation, name):
new_nodes, new_layers = _map_subgraph_network(self.inputs, [value])
add_metric_layer = base_layer.AddMetric(
aggregation, name, dtype=value.dtype)
add_metric_layer(value)
new_nodes.extend(add_metric_layer.inbound_nodes)
new_layers.append(add_metric_layer)
self._insert_layers(new_layers, new_nodes)
@property
def _trackable_saved_model_saver(self):
return network_serialization.NetworkSavedModelSaver(self)
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Arguments:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# "depth" is number of layers between output Node and the Node.
# Nodes are ordered from inputs -> outputs.
nodes_in_decreasing_depth, layer_indices = _build_map(outputs)
network_nodes = {
_make_node_key(node.layer.name, node.layer._inbound_nodes.index(node))
for node in nodes_in_decreasing_depth
}
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all nodes it is connected to + 1.
for node_dep in node.parent_nodes:
previous_depth = nodes_depths.get(node_dep, 0)
nodes_depths[node_dep] = max(depth + 1, previous_depth)
# Handle inputs that are not connected to outputs.
# We do not error out here because the inputs may be used to compute losses
# and metrics.
for input_t in inputs:
input_layer = input_t._keras_history[0]
if input_layer not in layers_depths:
layers_depths[input_layer] = 0
layer_indices[input_layer] = -1
nodes_depths[input_layer._inbound_nodes[0]] = 0
network_nodes.add(_make_node_key(input_layer.name, 0))
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = collections.defaultdict(list)
for layer, depth in layers_depths.items():
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers ordered by depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = set()
for x in inputs:
computable_tensors.add(id(x))
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.layer
if layer and not node.is_input:
for x in nest.flatten(node.keras_inputs):
if id(x) not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in nest.flatten(node.outputs):
computable_tensors.add(id(x))
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
def _build_map(outputs):
"""This method topologically sorts nodes in order from inputs to outputs.
It uses a depth-first search to topologically sort nodes that appear in the
_keras_history connectivity metadata of `outputs`.
Args:
outputs: the output tensors whose _keras_history metadata should be walked.
This may be an arbitrary nested structure.
Returns:
A tuple like (ordered_nodes, layer_to_first_traversal_index)
ordered_nodes: list of nodes appearing in the keras history, topologically
sorted from original inputs to the `outputs`.
(If outputs have different sets of ancestors, the inputs to one output
may appear after a different output).
layer_to_first_traversal_index:
A dict mapping layer to the traversal index in the DFS where it is
seen. Note: if a layer is shared by several nodes, the dict will only
store the index corresponding to the *first* time the layer seen.
"""
finished_nodes = set()
nodes_in_progress = set()
nodes_in_decreasing_depth = [] # nodes from inputs -> outputs.
layer_indices = {} # layer -> in traversal order.
for output in nest.flatten(outputs):
_build_map_helper(output, finished_nodes, nodes_in_progress,
nodes_in_decreasing_depth, layer_indices)
return nodes_in_decreasing_depth, layer_indices
def _build_map_helper(tensor, finished_nodes, nodes_in_progress,
nodes_in_decreasing_depth, layer_indices):
"""Recursive helper for `_build_map`."""
layer, node_index, _ = tensor._keras_history # pylint: disable=protected-access
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' + layer.name +
'" is part of a cycle.')
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
# Propagate to all previous tensors connected to this node.
nodes_in_progress.add(node)
if not node.is_input:
for tensor in node.keras_inputs:
_build_map_helper(tensor, finished_nodes, nodes_in_progress,
nodes_in_decreasing_depth, layer_indices)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
def _map_subgraph_network(inputs, outputs):
"""Returns the nodes and layers in the topology from `inputs` to `outputs`.
Args:
inputs: List of input tensors.
outputs: List of output tensors.
Returns:
A tuple of List{Node] and List[Layer].
"""
base_layer_utils.create_keras_history(outputs)
# Keep only nodes and layers in the topology between inputs and outputs.
_, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs)
return nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers
def _should_skip_first_node(layer):
"""Returns True if the first layer node should not be saved or loaded."""
# Networks start with a pre-existing node linking their input to output.
# For a sequential model, it is first created with _is_graph_network = False,
# we have to keep the _is_graph_network check here.
return isinstance(layer, Functional) and layer._is_graph_network
def _deserialize_keras_tensors(kwargs, layer_map):
"""Deserializes Keras Tensors passed to `call`.."""
def _deserialize_keras_tensor(t):
"""Deserializes a single Keras Tensor passed to `call`."""
if isinstance(t, tf_utils.ListWrapper):
t = t.as_list()
layer_name = t[0]
node_index = t[1]
tensor_index = t[2]
layer = layer_map[layer_name]
node = layer._inbound_nodes[node_index]
return nest.flatten(node.outputs)[tensor_index]
return t
kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True)
return nest.map_structure(_deserialize_keras_tensor, kwargs)
def connect_ancillary_layers(model, created_layers):
"""Adds layers that are not connected to the outputs to the model."""
# Layers not connected to outputs, such as those added in `add_loss`.
ancillary_layers = [
layer for layer in created_layers.values() if layer not in model.layers
]
if ancillary_layers:
relevant_nodes = nest.flatten([
layer.inbound_nodes[1:]
if _should_skip_first_node(layer) else layer.inbound_nodes
for layer in created_layers.values()
])
model._insert_layers(ancillary_layers, relevant_nodes)
return model
def reconstruct_from_config(config, custom_objects=None, created_layers=None):
"""Reconstructs graph from config object.
Args:
config: Dictionary returned from Network.get_config()
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
created_layers: Optional dictionary mapping names to Layer objects. Any
layer not in this dictionary will be be created and added to the dict.
This function will add new nodes to all layers (excluding InputLayers),
instead of re-using pre-existing nodes in the layers.
Returns:
Tuple of (input tensors, output tensors, dictionary of created layers)
"""
# Layer instances created during the graph reconstruction process.
created_layers = created_layers or collections.OrderedDict()
# Maps input data (tuple of inbound layer name, node index) from the config
# to node indices in the newly generated model. The node indices may be
# different if the layers have already been called previously.
node_index_map = {}
node_count_by_layer = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def get_node_index(layer, config_node_index):
"""Returns node index in layer (might differ from config_node_index)."""
if isinstance(layer, input_layer_module.InputLayer):
return 0
return node_index_map.get((layer.name, config_node_index), None)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: Nested structure of `ListWrapper`.
Raises:
ValueError: In case of improperly formatted `node_data`.
"""
input_tensors = []
for input_data in nest.flatten(node_data):
input_data = input_data.as_list()
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
kwargs = _deserialize_keras_tensors(kwargs, created_layers)
else:
raise ValueError('Improperly formatted model config.')
inbound_layer = created_layers[inbound_layer_name]
inbound_node_index = get_node_index(inbound_layer, inbound_node_index)
if inbound_node_index is None:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
nest.flatten(inbound_node.outputs)[inbound_tensor_index])
input_tensors = nest.pack_sequence_as(node_data, input_tensors)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors is not None:
input_tensors = base_layer_utils.unnest_if_single_tensor(input_tensors)
output_tensors = layer(input_tensors, **kwargs)
# Update node index map.
output_index = nest.flatten(output_tensors)[0]._keras_history.node_index
node_index_map[(layer.name, node_count_by_layer[layer])] = output_index
node_count_by_layer[layer] += 1
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
if layer_name in created_layers:
layer = created_layers[layer_name]
else:
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
node_count_by_layer[layer] = int(_should_skip_first_node(layer))
# Gather layer inputs and convert to `ListWrapper` objects.
inbound_nodes_data = layer_data['inbound_nodes']
inbound_nodes_data = tf_utils.convert_inner_node_data(
inbound_nodes_data, wrap=True)
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
input_tensors = []
output_tensors = []
input_layers = tf_utils.convert_inner_node_data(
config['input_layers'], wrap=True)
for layer_data in nest.flatten(input_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
node_index = get_node_index(layer, node_index)
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
output_layers = tf_utils.convert_inner_node_data(
config['output_layers'], wrap=True)
for layer_data in nest.flatten(output_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
node_index = get_node_index(layer, node_index)
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
input_tensors = nest.pack_sequence_as(input_layers, input_tensors)
output_tensors = nest.pack_sequence_as(output_layers, output_tensors)
return input_tensors, output_tensors, created_layers
def get_network_config(network, serialize_layer_fn=None):
"""Builds the config, which consists of the node graph and serialized layers.
Args:
network: A Network object.
serialize_layer_fn: Function used to serialize layers.
Returns:
Config dictionary.
"""
serialize_layer_fn = (
serialize_layer_fn or generic_utils.serialize_keras_object)
config = {
'name': network.name,
}
node_conversion_map = {}
for layer in network.layers:
kept_nodes = 1 if _should_skip_first_node(layer) else 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in network._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in network.layers: # From the earliest layers on.
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in network._network_nodes and not node.is_input:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
node_data = node.serialize(_make_node_key, node_conversion_map)
filtered_inbound_nodes.append(node_data)
layer_config = serialize_layer_fn(layer)
layer_config['name'] = layer.name
layer_config['inbound_nodes'] = filtered_inbound_nodes
layer_configs.append(layer_config)
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(network._input_layers)):
layer, node_index, tensor_index = network._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in network._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_inputs = nest.pack_sequence_as(network._nested_inputs, model_inputs)
# Preserve external Keras compat for Models with single input.
if not nest.is_sequence(model_inputs):
model_inputs = [model_inputs]
model_inputs = tf_utils.convert_inner_node_data(model_inputs)
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(network._output_layers)):
layer, node_index, tensor_index = network._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in network._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_outputs = nest.pack_sequence_as(network._nested_outputs, model_outputs)
# Preserve external Keras compat for Models with single output.
if not nest.is_sequence(model_outputs):
model_outputs = [model_outputs]
model_outputs = tf_utils.convert_inner_node_data(model_outputs)
config['output_layers'] = model_outputs
return config
| 40.01171
| 120
| 0.700478
|
0d2e4d029636577adc74784d9a8b3494b94dc67d
| 7,565
|
py
|
Python
|
tensorflow/compiler/tests/adam_test.py
|
awan-10/tensorflow
|
9d8b58a8074a5bdc152cd5a2a9260ccb72eaef90
|
[
"Apache-2.0"
] | 5
|
2018-10-20T03:54:49.000Z
|
2021-01-02T07:19:53.000Z
|
tensorflow/compiler/tests/adam_test.py
|
liufengdb/tensorflow
|
51100a8de57ef53e36a8a9f5a9829cbd33fbed04
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/compiler/tests/adam_test.py
|
liufengdb/tensorflow
|
51100a8de57ef53e36a8a9f5a9829cbd33fbed04
|
[
"Apache-2.0"
] | 2
|
2019-02-07T15:33:54.000Z
|
2019-03-29T17:44:31.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype == np.float16:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTensorLearningRate(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype == np.float16:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype == np.float16:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
else:
update2.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
if __name__ == "__main__":
test.main()
| 40.672043
| 80
| 0.648249
|
588b7750ae37357dc41aff94901a13379845a549
| 267
|
py
|
Python
|
exercises/11_Matrix_games/.ipynb_checkpoints/problem_instances-checkpoint.py
|
kiwomuc/optimization-for-DS-lecture
|
43ea50ef85f73b5bbc7659e8c457218ae136bb94
|
[
"CC0-1.0"
] | 5
|
2021-10-03T14:40:28.000Z
|
2022-01-20T15:34:36.000Z
|
exercises/11_Matrix_games/.ipynb_checkpoints/problem_instances-checkpoint.py
|
kiwomuc/optimization-for-DS-lecture
|
43ea50ef85f73b5bbc7659e8c457218ae136bb94
|
[
"CC0-1.0"
] | 10
|
2021-10-21T13:02:40.000Z
|
2022-02-06T19:50:32.000Z
|
exercises/11_Matrix_games/.ipynb_checkpoints/problem_instances-checkpoint.py
|
kiwomuc/optimization-for-DS-lecture
|
43ea50ef85f73b5bbc7659e8c457218ae136bb94
|
[
"CC0-1.0"
] | 12
|
2021-10-05T21:47:52.000Z
|
2022-02-04T15:38:30.000Z
|
import numpy as np
def policeman_and_burglar_matrix(n, th=0.8):
w = np.abs(np.random.randn(n))
th = 0.8
C = np.empty((n, n))
for i in range(n):
for j in range(n):
C[i,j] = np.abs(i-j)
A = w * (1 - np.exp(-th * C))
return A
| 24.272727
| 44
| 0.513109
|
b8c4806999d77e838f7310c6b9cf61d1e19590dc
| 734
|
py
|
Python
|
Artificial Intelligence/Bot Building/BotClean/Solution.py
|
RaulButuc/SolvedProblemsArchive
|
df8695127f2204c4924e6ca0f14bcf362376bfa7
|
[
"MIT"
] | 14
|
2017-02-09T12:54:37.000Z
|
2020-07-08T00:23:13.000Z
|
Artificial Intelligence/Bot Building/BotClean/Solution.py
|
RaulButuc/SolvedProblemsArchive
|
df8695127f2204c4924e6ca0f14bcf362376bfa7
|
[
"MIT"
] | null | null | null |
Artificial Intelligence/Bot Building/BotClean/Solution.py
|
RaulButuc/SolvedProblemsArchive
|
df8695127f2204c4924e6ca0f14bcf362376bfa7
|
[
"MIT"
] | 5
|
2020-02-22T20:55:21.000Z
|
2020-07-08T00:19:51.000Z
|
#!/usr/bin/python
def manhattan_dist(x1, y1, x2, y2):
return (abs(x1 - x2) + abs(y1 - y2), x2, y2)
def next_move(x, y, board, dirty):
dist = [manhattan_dist(x, y, d[0], d[1]) for d in dirty]
dist.sort()
dx = dist[0][1]
dy = dist[0][2]
if x == dx and y == dy:
print "CLEAN"
elif x < dx:
print "DOWN"
elif x > dx:
print "UP"
elif y < dy:
print "RIGHT"
elif y > dy:
print "LEFT"
if __name__ == "__main__":
pos = [int(i) for i in raw_input().strip().split()]
board = [[j for j in raw_input().strip()] for i in range(5)]
dirty = [(i, j) for i in range(5) for j in range(5) if board[i][j] == 'd']
next_move(pos[0], pos[1], board, dirty)
| 27.185185
| 78
| 0.523161
|
c67413f7d39f02fa8d7fcca67ff0e8cdd94cf247
| 31,329
|
py
|
Python
|
cloudshell/snmp/mibs/SNMPv2-MIB.py
|
QualiSystems/cloudshell-snmp
|
9f8c4a927997d69cf85cac2a9bb1c36952d62d99
|
[
"Apache-2.0"
] | null | null | null |
cloudshell/snmp/mibs/SNMPv2-MIB.py
|
QualiSystems/cloudshell-snmp
|
9f8c4a927997d69cf85cac2a9bb1c36952d62d99
|
[
"Apache-2.0"
] | 36
|
2016-05-13T08:42:13.000Z
|
2021-07-07T13:53:23.000Z
|
cloudshell/snmp/mibs/SNMPv2-MIB.py
|
QualiSystems/cloudshell-snmp
|
9f8c4a927997d69cf85cac2a9bb1c36952d62d99
|
[
"Apache-2.0"
] | 5
|
2016-08-05T17:49:21.000Z
|
2019-05-28T03:27:22.000Z
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pysnmp.sf.net/license.html
#
# PySNMP MIB module SNMPv2-MIB (http://pysnmp.sf.net)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/SNMPv2-MIB
# Produced by pysmi-0.1.3 at Tue Apr 18 00:52:45 2017
# On host grommit.local platform Darwin version 16.4.0 by user ilya
# Using Python version 3.4.2 (v3.4.2:ab2c023a9432, Oct 5 2014, 20:42:22)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
mib_2, snmpModules, ModuleIdentity, Counter64, ObjectIdentity, Integer32, NotificationType, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Bits, IpAddress, Gauge32, Unsigned32, TimeTicks, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "mib-2", "snmpModules", "ModuleIdentity", "Counter64", "ObjectIdentity", "Integer32", "NotificationType", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Bits", "IpAddress", "Gauge32", "Unsigned32", "TimeTicks", "MibIdentifier")
TextualConvention, TestAndIncr, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TestAndIncr", "TimeStamp", "DisplayString")
snmpMIB = ModuleIdentity((1, 3, 6, 1, 6, 3, 1))
if mibBuilder.loadTexts: snmpMIB.setRevisions(('2000-08-09 20:17', '1995-11-09 00:00', '1993-04-01 00:00',))
if mibBuilder.loadTexts: snmpMIB.setLastUpdated('200008092017Z')
if mibBuilder.loadTexts: snmpMIB.setOrganization('IETF SNMPv3 Working Group')
if mibBuilder.loadTexts: snmpMIB.setContactInfo('WG-EMail: snmpv3@tis.com Subscribe: majordomo@tis.com In message body: subscribe snmpv3 Chair: Russ Mundy TIS Labs at Network Associates postal: 3060 Washington Rd Glenwood MD 21738 USA EMail: mundy@tislabs.com phone: +1 301 854-6889 Editor: Randy Presuhn BMC Software, Inc. postal: 2141 North First Street San Jose, CA 95131 USA EMail: randy_presuhn@bmc.com phone: +1 408 546-1006')
if mibBuilder.loadTexts: snmpMIB.setDescription('The MIB module for SNMP entities.')
snmpMIBObjects = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 1))
system = MibIdentifier((1, 3, 6, 1, 2, 1, 1))
sysDescr = MibScalar((1, 3, 6, 1, 2, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysDescr.setStatus('current')
if mibBuilder.loadTexts: sysDescr.setDescription("A textual description of the base_entity. This value should include the full name and version identification of the system's hardware type, software operating-system, and networking software.")
sysObjectID = MibScalar((1, 3, 6, 1, 2, 1, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysObjectID.setStatus('current')
if mibBuilder.loadTexts: sysObjectID.setDescription("The vendor's authoritative identification of the network management subsystem contained in the base_entity. This value is allocated within the SMI enterprises subtree (1.3.6.1.4.1) and provides an easy and unambiguous means for determining `what kind of box' is being managed. For example, if vendor `Flintstones, Inc.' was assigned the subtree 1.3.6.1.4.1.424242, it could assign the identifier 1.3.6.1.4.1.424242.1.1 to its `Fred Router'.")
sysUpTime = MibScalar((1, 3, 6, 1, 2, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysUpTime.setStatus('current')
if mibBuilder.loadTexts: sysUpTime.setDescription('The time (in hundredths of a second) since the network management portion of the system was last re-initialized.')
sysContact = MibScalar((1, 3, 6, 1, 2, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysContact.setStatus('current')
if mibBuilder.loadTexts: sysContact.setDescription('The textual identification of the contact person for this managed node, together with information on how to contact this person. If no contact information is known, the value is the zero-length string.')
sysName = MibScalar((1, 3, 6, 1, 2, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysName.setStatus('current')
if mibBuilder.loadTexts: sysName.setDescription("An administratively-assigned name for this managed node. By convention, this is the node's fully-qualified domain name. If the name is unknown, the value is the zero-length string.")
sysLocation = MibScalar((1, 3, 6, 1, 2, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysLocation.setStatus('current')
if mibBuilder.loadTexts: sysLocation.setDescription("The physical location of this node (e.g., 'telephone closet, 3rd floor'). If the location is unknown, the value is the zero-length string.")
sysServices = MibScalar((1, 3, 6, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysServices.setStatus('current')
if mibBuilder.loadTexts: sysServices.setDescription('A value which indicates the set of services that this base_entity may potentially offer. The value is a sum. This sum initially takes the value zero. Then, for each layer, L, in the range 1 through 7, that this node performs transactions for, 2 raised to (L - 1) is added to the sum. For example, a node which performs only routing functions would have a value of 4 (2^(3-1)). In contrast, a node which is a host offering application services would have a value of 72 (2^(4-1) + 2^(7-1)). Note that in the context of the Internet suite of protocols, values should be calculated accordingly: layer functionality 1 physical (e.g., repeaters) 2 datalink/subnetwork (e.g., bridges) 3 internet (e.g., supports the IP) 4 end-to-end (e.g., supports the TCP) 7 applications (e.g., supports the SMTP) For systems including OSI protocols, layers 5 and 6 may also be counted.')
sysORLastChange = MibScalar((1, 3, 6, 1, 2, 1, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysORLastChange.setStatus('current')
if mibBuilder.loadTexts: sysORLastChange.setDescription('The value of sysUpTime at the time of the most recent change in state or value of any instance of sysORID.')
sysORTable = MibTable((1, 3, 6, 1, 2, 1, 1, 9), )
if mibBuilder.loadTexts: sysORTable.setStatus('current')
if mibBuilder.loadTexts: sysORTable.setDescription('The (conceptual) table listing the capabilities of the local SNMP application acting as a command responder with respect to various MIB modules. SNMP entities having dynamically-configurable support of MIB modules will have a dynamically-varying number of conceptual rows.')
sysOREntry = MibTableRow((1, 3, 6, 1, 2, 1, 1, 9, 1), ).setIndexNames((0, "SNMPv2-MIB", "sysORIndex"))
if mibBuilder.loadTexts: sysOREntry.setStatus('current')
if mibBuilder.loadTexts: sysOREntry.setDescription('An entry (conceptual row) in the sysORTable.')
sysORIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: sysORIndex.setStatus('current')
if mibBuilder.loadTexts: sysORIndex.setDescription('The auxiliary variable used for identifying instances of the columnar objects in the sysORTable.')
sysORID = MibTableColumn((1, 3, 6, 1, 2, 1, 1, 9, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysORID.setStatus('current')
if mibBuilder.loadTexts: sysORID.setDescription('An authoritative identification of a capabilities statement with respect to various MIB modules supported by the local SNMP application acting as a command responder.')
sysORDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 1, 9, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysORDescr.setStatus('current')
if mibBuilder.loadTexts: sysORDescr.setDescription('A textual description of the capabilities identified by the corresponding instance of sysORID.')
sysORUpTime = MibTableColumn((1, 3, 6, 1, 2, 1, 1, 9, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysORUpTime.setStatus('current')
if mibBuilder.loadTexts: sysORUpTime.setDescription('The value of sysUpTime at the time this conceptual row was last instantiated.')
snmp = MibIdentifier((1, 3, 6, 1, 2, 1, 11))
snmpInPkts = MibScalar((1, 3, 6, 1, 2, 1, 11, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInPkts.setStatus('current')
if mibBuilder.loadTexts: snmpInPkts.setDescription('The total number of messages delivered to the SNMP base_entity from the transport service.')
snmpInBadVersions = MibScalar((1, 3, 6, 1, 2, 1, 11, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInBadVersions.setStatus('current')
if mibBuilder.loadTexts: snmpInBadVersions.setDescription('The total number of SNMP messages which were delivered to the SNMP base_entity and were for an unsupported SNMP version.')
snmpInBadCommunityNames = MibScalar((1, 3, 6, 1, 2, 1, 11, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInBadCommunityNames.setStatus('current')
if mibBuilder.loadTexts: snmpInBadCommunityNames.setDescription('The total number of community-based SNMP messages (for example, SNMPv1) delivered to the SNMP base_entity which used an SNMP community name not known to said base_entity. Also, implementations which authenticate community-based SNMP messages using check(s) in addition to matching the community name (for example, by also checking whether the message originated from a transport address allowed to use a specified community name) MAY include in this value the number of messages which failed the additional check(s). It is strongly RECOMMENDED that the documentation for any security model which is used to authenticate community-based SNMP messages specify the precise conditions that contribute to this value.')
snmpInBadCommunityUses = MibScalar((1, 3, 6, 1, 2, 1, 11, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInBadCommunityUses.setStatus('current')
if mibBuilder.loadTexts: snmpInBadCommunityUses.setDescription('The total number of community-based SNMP messages (for example, SNMPv1) delivered to the SNMP base_entity which represented an SNMP operation that was not allowed for the SNMP community named in the message. The precise conditions under which this counter is incremented (if at all) depend on how the SNMP base_entity implements its access control mechanism and how its applications interact with that access control mechanism. It is strongly RECOMMENDED that the documentation for any access control mechanism which is used to control access to and visibility of MIB instrumentation specify the precise conditions that contribute to this value.')
snmpInASNParseErrs = MibScalar((1, 3, 6, 1, 2, 1, 11, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInASNParseErrs.setStatus('current')
if mibBuilder.loadTexts: snmpInASNParseErrs.setDescription('The total number of ASN.1 or BER errors encountered by the SNMP base_entity when decoding received SNMP messages.')
snmpEnableAuthenTraps = MibScalar((1, 3, 6, 1, 2, 1, 11, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpEnableAuthenTraps.setStatus('current')
if mibBuilder.loadTexts: snmpEnableAuthenTraps.setDescription('Indicates whether the SNMP base_entity is permitted to generate authenticationFailure traps. The value of this object overrides any configuration information; as such, it provides a means whereby all authenticationFailure traps may be disabled. Note that it is strongly recommended that this object be stored in non-volatile memory so that it remains constant across re-initializations of the network management system.')
snmpSilentDrops = MibScalar((1, 3, 6, 1, 2, 1, 11, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpSilentDrops.setStatus('current')
if mibBuilder.loadTexts: snmpSilentDrops.setDescription('The total number of Confirmed Class PDUs (such as GetRequest-PDUs, GetNextRequest-PDUs, GetBulkRequest-PDUs, SetRequest-PDUs, and InformRequest-PDUs) delivered to the SNMP base_entity which were silently dropped because the size of a reply containing an alternate Response Class PDU (such as a Response-PDU) with an empty variable-bindings field was greater than either a local constraint or the maximum message size associated with the originator of the request.')
snmpProxyDrops = MibScalar((1, 3, 6, 1, 2, 1, 11, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpProxyDrops.setStatus('current')
if mibBuilder.loadTexts: snmpProxyDrops.setDescription('The total number of Confirmed Class PDUs (such as GetRequest-PDUs, GetNextRequest-PDUs, GetBulkRequest-PDUs, SetRequest-PDUs, and InformRequest-PDUs) delivered to the SNMP base_entity which were silently dropped because the transmission of the (possibly translated) message to a proxy target failed in a manner (other than a time-out) such that no Response Class PDU (such as a Response-PDU) could be returned.')
snmpTrap = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 1, 4))
snmpTrapOID = MibScalar((1, 3, 6, 1, 6, 3, 1, 1, 4, 1), ObjectIdentifier()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: snmpTrapOID.setStatus('current')
if mibBuilder.loadTexts: snmpTrapOID.setDescription('The authoritative identification of the notification currently being sent. This variable occurs as the second varbind in every SNMPv2-Trap-PDU and InformRequest-PDU.')
snmpTrapEnterprise = MibScalar((1, 3, 6, 1, 6, 3, 1, 1, 4, 3), ObjectIdentifier()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: snmpTrapEnterprise.setStatus('current')
if mibBuilder.loadTexts: snmpTrapEnterprise.setDescription('The authoritative identification of the enterprise associated with the trap currently being sent. When an SNMP proxy agent is mapping an RFC1157 Trap-PDU into a SNMPv2-Trap-PDU, this variable occurs as the last varbind.')
snmpTraps = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 1, 5))
coldStart = NotificationType((1, 3, 6, 1, 6, 3, 1, 1, 5, 1)).setObjects()
if mibBuilder.loadTexts: coldStart.setStatus('current')
if mibBuilder.loadTexts: coldStart.setDescription('A coldStart trap signifies that the SNMP base_entity, supporting a notification originator application, is reinitializing itself and that its configuration may have been altered.')
warmStart = NotificationType((1, 3, 6, 1, 6, 3, 1, 1, 5, 2)).setObjects()
if mibBuilder.loadTexts: warmStart.setStatus('current')
if mibBuilder.loadTexts: warmStart.setDescription('A warmStart trap signifies that the SNMP base_entity, supporting a notification originator application, is reinitializing itself such that its configuration is unaltered.')
authenticationFailure = NotificationType((1, 3, 6, 1, 6, 3, 1, 1, 5, 5)).setObjects()
if mibBuilder.loadTexts: authenticationFailure.setStatus('current')
if mibBuilder.loadTexts: authenticationFailure.setDescription('An authenticationFailure trap signifies that the SNMP base_entity has received a protocol message that is not properly authenticated. While all implementations of SNMP entities MAY be capable of generating this trap, the snmpEnableAuthenTraps object indicates whether this trap will be generated.')
snmpSet = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 1, 6))
snmpSetSerialNo = MibScalar((1, 3, 6, 1, 6, 3, 1, 1, 6, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpSetSerialNo.setStatus('current')
if mibBuilder.loadTexts: snmpSetSerialNo.setDescription('An advisory lock used to allow several cooperating command generator applications to coordinate their use of the SNMP set operation. This object is used for coarse-grain coordination. To achieve fine-grain coordination, one or more similar objects might be defined within each MIB group, as appropriate.')
snmpMIBConformance = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 2))
snmpMIBCompliances = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 2, 1))
snmpMIBGroups = MibIdentifier((1, 3, 6, 1, 6, 3, 1, 2, 2))
snmpBasicCompliance = ModuleCompliance((1, 3, 6, 1, 6, 3, 1, 2, 1, 2)).setObjects(("SNMPv2-MIB", "snmpGroup"), ("SNMPv2-MIB", "snmpSetGroup"), ("SNMPv2-MIB", "systemGroup"), ("SNMPv2-MIB", "snmpBasicNotificationsGroup"), ("SNMPv2-MIB", "snmpCommunityGroup"))
if mibBuilder.loadTexts: snmpBasicCompliance.setDescription('The compliance statement for SNMPv2 entities which implement the SNMPv2 MIB. This compliance statement is replaced by snmpBasicComplianceRev2.')
snmpBasicComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 6, 3, 1, 2, 1, 3)).setObjects(("SNMPv2-MIB", "snmpGroup"), ("SNMPv2-MIB", "snmpSetGroup"), ("SNMPv2-MIB", "systemGroup"), ("SNMPv2-MIB", "snmpBasicNotificationsGroup"), ("SNMPv2-MIB", "snmpCommunityGroup"), ("SNMPv2-MIB", "snmpWarmStartNotificationGroup"))
if mibBuilder.loadTexts: snmpBasicComplianceRev2.setDescription('The compliance statement for SNMP entities which implement this MIB module.')
snmpGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 8)).setObjects(("SNMPv2-MIB", "snmpInPkts"), ("SNMPv2-MIB", "snmpInBadVersions"), ("SNMPv2-MIB", "snmpInASNParseErrs"), ("SNMPv2-MIB", "snmpSilentDrops"), ("SNMPv2-MIB", "snmpProxyDrops"), ("SNMPv2-MIB", "snmpEnableAuthenTraps"))
if mibBuilder.loadTexts: snmpGroup.setDescription('A collection of objects providing basic instrumentation and control of an SNMP base_entity.')
snmpCommunityGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 9)).setObjects(("SNMPv2-MIB", "snmpInBadCommunityNames"), ("SNMPv2-MIB", "snmpInBadCommunityUses"))
if mibBuilder.loadTexts: snmpCommunityGroup.setDescription('A collection of objects providing basic instrumentation of a SNMP base_entity which supports community-based authentication.')
snmpSetGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 5)).setObjects(("SNMPv2-MIB", "snmpSetSerialNo"))
if mibBuilder.loadTexts: snmpSetGroup.setDescription('A collection of objects which allow several cooperating command generator applications to coordinate their use of the set operation.')
systemGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 6)).setObjects(("SNMPv2-MIB", "sysDescr"), ("SNMPv2-MIB", "sysObjectID"), ("SNMPv2-MIB", "sysUpTime"), ("SNMPv2-MIB", "sysContact"), ("SNMPv2-MIB", "sysName"), ("SNMPv2-MIB", "sysLocation"), ("SNMPv2-MIB", "sysServices"), ("SNMPv2-MIB", "sysORLastChange"), ("SNMPv2-MIB", "sysORID"), ("SNMPv2-MIB", "sysORUpTime"), ("SNMPv2-MIB", "sysORDescr"))
if mibBuilder.loadTexts: systemGroup.setDescription('The system group defines objects which are common to all managed systems.')
snmpBasicNotificationsGroup = NotificationGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 7)).setObjects(("SNMPv2-MIB", "coldStart"), ("SNMPv2-MIB", "authenticationFailure"))
if mibBuilder.loadTexts: snmpBasicNotificationsGroup.setDescription('The basic notifications implemented by an SNMP base_entity supporting command responder applications.')
snmpWarmStartNotificationGroup = NotificationGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 11)).setObjects(("SNMPv2-MIB", "warmStart"))
if mibBuilder.loadTexts: snmpWarmStartNotificationGroup.setDescription('An additional notification for an SNMP base_entity supporting command responder applications, if it is able to reinitialize itself such that its configuration is unaltered.')
snmpNotificationGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 12)).setObjects(("SNMPv2-MIB", "snmpTrapOID"), ("SNMPv2-MIB", "snmpTrapEnterprise"))
if mibBuilder.loadTexts: snmpNotificationGroup.setDescription('These objects are required for entities which support notification originator applications.')
snmpOutPkts = MibScalar((1, 3, 6, 1, 2, 1, 11, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutPkts.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutPkts.setDescription('The total number of SNMP Messages which were passed from the SNMP protocol base_entity to the transport service.')
snmpInTooBigs = MibScalar((1, 3, 6, 1, 2, 1, 11, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInTooBigs.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInTooBigs.setDescription("The total number of SNMP PDUs which were delivered to the SNMP protocol base_entity and for which the value of the error-status field was `tooBig'.")
snmpInNoSuchNames = MibScalar((1, 3, 6, 1, 2, 1, 11, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInNoSuchNames.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInNoSuchNames.setDescription("The total number of SNMP PDUs which were delivered to the SNMP protocol base_entity and for which the value of the error-status field was `noSuchName'.")
snmpInBadValues = MibScalar((1, 3, 6, 1, 2, 1, 11, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInBadValues.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInBadValues.setDescription("The total number of SNMP PDUs which were delivered to the SNMP protocol base_entity and for which the value of the error-status field was `badValue'.")
snmpInReadOnlys = MibScalar((1, 3, 6, 1, 2, 1, 11, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInReadOnlys.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInReadOnlys.setDescription("The total number valid SNMP PDUs which were delivered to the SNMP protocol base_entity and for which the value of the error-status field was `readOnly'. It should be noted that it is a protocol error to generate an SNMP PDU which contains the value `readOnly' in the error-status field, as such this object is provided as a means of detecting incorrect implementations of the SNMP.")
snmpInGenErrs = MibScalar((1, 3, 6, 1, 2, 1, 11, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInGenErrs.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInGenErrs.setDescription("The total number of SNMP PDUs which were delivered to the SNMP protocol base_entity and for which the value of the error-status field was `genErr'.")
snmpInTotalReqVars = MibScalar((1, 3, 6, 1, 2, 1, 11, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInTotalReqVars.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInTotalReqVars.setDescription('The total number of MIB objects which have been retrieved successfully by the SNMP protocol base_entity as the result of receiving valid SNMP Get-Request and Get-Next PDUs.')
snmpInTotalSetVars = MibScalar((1, 3, 6, 1, 2, 1, 11, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInTotalSetVars.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInTotalSetVars.setDescription('The total number of MIB objects which have been altered successfully by the SNMP protocol base_entity as the result of receiving valid SNMP Set-Request PDUs.')
snmpInGetRequests = MibScalar((1, 3, 6, 1, 2, 1, 11, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInGetRequests.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInGetRequests.setDescription('The total number of SNMP Get-Request PDUs which have been accepted and processed by the SNMP protocol base_entity.')
snmpInGetNexts = MibScalar((1, 3, 6, 1, 2, 1, 11, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInGetNexts.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInGetNexts.setDescription('The total number of SNMP Get-Next PDUs which have been accepted and processed by the SNMP protocol base_entity.')
snmpInSetRequests = MibScalar((1, 3, 6, 1, 2, 1, 11, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInSetRequests.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInSetRequests.setDescription('The total number of SNMP Set-Request PDUs which have been accepted and processed by the SNMP protocol base_entity.')
snmpInGetResponses = MibScalar((1, 3, 6, 1, 2, 1, 11, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInGetResponses.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInGetResponses.setDescription('The total number of SNMP Get-Response PDUs which have been accepted and processed by the SNMP protocol base_entity.')
snmpInTraps = MibScalar((1, 3, 6, 1, 2, 1, 11, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInTraps.setStatus('obsolete')
if mibBuilder.loadTexts: snmpInTraps.setDescription('The total number of SNMP Trap PDUs which have been accepted and processed by the SNMP protocol base_entity.')
snmpOutTooBigs = MibScalar((1, 3, 6, 1, 2, 1, 11, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutTooBigs.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutTooBigs.setDescription("The total number of SNMP PDUs which were generated by the SNMP protocol base_entity and for which the value of the error-status field was `tooBig.'")
snmpOutNoSuchNames = MibScalar((1, 3, 6, 1, 2, 1, 11, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutNoSuchNames.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutNoSuchNames.setDescription("The total number of SNMP PDUs which were generated by the SNMP protocol base_entity and for which the value of the error-status was `noSuchName'.")
snmpOutBadValues = MibScalar((1, 3, 6, 1, 2, 1, 11, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutBadValues.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutBadValues.setDescription("The total number of SNMP PDUs which were generated by the SNMP protocol base_entity and for which the value of the error-status field was `badValue'.")
snmpOutGenErrs = MibScalar((1, 3, 6, 1, 2, 1, 11, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutGenErrs.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutGenErrs.setDescription("The total number of SNMP PDUs which were generated by the SNMP protocol base_entity and for which the value of the error-status field was `genErr'.")
snmpOutGetRequests = MibScalar((1, 3, 6, 1, 2, 1, 11, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutGetRequests.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutGetRequests.setDescription('The total number of SNMP Get-Request PDUs which have been generated by the SNMP protocol base_entity.')
snmpOutGetNexts = MibScalar((1, 3, 6, 1, 2, 1, 11, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutGetNexts.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutGetNexts.setDescription('The total number of SNMP Get-Next PDUs which have been generated by the SNMP protocol base_entity.')
snmpOutSetRequests = MibScalar((1, 3, 6, 1, 2, 1, 11, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutSetRequests.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutSetRequests.setDescription('The total number of SNMP Set-Request PDUs which have been generated by the SNMP protocol base_entity.')
snmpOutGetResponses = MibScalar((1, 3, 6, 1, 2, 1, 11, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutGetResponses.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutGetResponses.setDescription('The total number of SNMP Get-Response PDUs which have been generated by the SNMP protocol base_entity.')
snmpOutTraps = MibScalar((1, 3, 6, 1, 2, 1, 11, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutTraps.setStatus('obsolete')
if mibBuilder.loadTexts: snmpOutTraps.setDescription('The total number of SNMP Trap PDUs which have been generated by the SNMP protocol base_entity.')
snmpObsoleteGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 1, 2, 2, 10)).setObjects(("SNMPv2-MIB", "snmpOutPkts"), ("SNMPv2-MIB", "snmpInTooBigs"), ("SNMPv2-MIB", "snmpInNoSuchNames"), ("SNMPv2-MIB", "snmpInBadValues"), ("SNMPv2-MIB", "snmpInReadOnlys"), ("SNMPv2-MIB", "snmpInGenErrs"), ("SNMPv2-MIB", "snmpInTotalReqVars"), ("SNMPv2-MIB", "snmpInTotalSetVars"), ("SNMPv2-MIB", "snmpInGetRequests"), ("SNMPv2-MIB", "snmpInGetNexts"), ("SNMPv2-MIB", "snmpInSetRequests"), ("SNMPv2-MIB", "snmpInGetResponses"), ("SNMPv2-MIB", "snmpInTraps"), ("SNMPv2-MIB", "snmpOutTooBigs"), ("SNMPv2-MIB", "snmpOutNoSuchNames"), ("SNMPv2-MIB", "snmpOutBadValues"), ("SNMPv2-MIB", "snmpOutGenErrs"), ("SNMPv2-MIB", "snmpOutGetRequests"), ("SNMPv2-MIB", "snmpOutGetNexts"), ("SNMPv2-MIB", "snmpOutSetRequests"), ("SNMPv2-MIB", "snmpOutGetResponses"), ("SNMPv2-MIB", "snmpOutTraps"))
if mibBuilder.loadTexts: snmpObsoleteGroup.setDescription('A collection of objects from RFC 1213 made obsolete by this MIB module.')
mibBuilder.exportSymbols("SNMPv2-MIB", snmpOutBadValues=snmpOutBadValues, coldStart=coldStart, snmpOutPkts=snmpOutPkts, snmpSilentDrops=snmpSilentDrops, snmpCommunityGroup=snmpCommunityGroup, sysORLastChange=sysORLastChange, sysName=sysName, snmpBasicNotificationsGroup=snmpBasicNotificationsGroup, systemGroup=systemGroup, snmpInNoSuchNames=snmpInNoSuchNames, snmpInTotalSetVars=snmpInTotalSetVars, snmpOutTooBigs=snmpOutTooBigs, snmpInBadCommunityNames=snmpInBadCommunityNames, snmpInASNParseErrs=snmpInASNParseErrs, snmpProxyDrops=snmpProxyDrops, snmpInPkts=snmpInPkts, snmpInSetRequests=snmpInSetRequests, snmpInTraps=snmpInTraps, sysORIndex=sysORIndex, snmpOutGetRequests=snmpOutGetRequests, PYSNMP_MODULE_ID=snmpMIB, snmpMIB=snmpMIB, snmpTrap=snmpTrap, sysOREntry=sysOREntry, snmp=snmp, snmpSet=snmpSet, warmStart=warmStart, snmpOutGetNexts=snmpOutGetNexts, snmpOutGetResponses=snmpOutGetResponses, snmpGroup=snmpGroup, sysLocation=sysLocation, snmpOutSetRequests=snmpOutSetRequests, snmpMIBGroups=snmpMIBGroups, snmpTrapOID=snmpTrapOID, system=system, snmpWarmStartNotificationGroup=snmpWarmStartNotificationGroup, snmpInBadCommunityUses=snmpInBadCommunityUses, snmpBasicComplianceRev2=snmpBasicComplianceRev2, sysContact=sysContact, snmpInGetNexts=snmpInGetNexts, sysORUpTime=sysORUpTime, snmpInGetResponses=snmpInGetResponses, snmpTraps=snmpTraps, snmpInGenErrs=snmpInGenErrs, snmpInReadOnlys=snmpInReadOnlys, snmpMIBCompliances=snmpMIBCompliances, snmpMIBObjects=snmpMIBObjects, snmpOutTraps=snmpOutTraps, snmpEnableAuthenTraps=snmpEnableAuthenTraps, snmpSetSerialNo=snmpSetSerialNo, snmpInTotalReqVars=snmpInTotalReqVars, snmpInBadVersions=snmpInBadVersions, snmpMIBConformance=snmpMIBConformance, sysORTable=sysORTable, sysORID=sysORID, snmpInTooBigs=snmpInTooBigs, sysORDescr=sysORDescr, sysUpTime=sysUpTime, sysDescr=sysDescr, snmpBasicCompliance=snmpBasicCompliance, snmpInGetRequests=snmpInGetRequests, snmpInBadValues=snmpInBadValues, snmpSetGroup=snmpSetGroup, sysServices=sysServices, snmpOutNoSuchNames=snmpOutNoSuchNames, sysObjectID=sysObjectID, authenticationFailure=authenticationFailure, snmpObsoleteGroup=snmpObsoleteGroup, snmpOutGenErrs=snmpOutGenErrs, snmpTrapEnterprise=snmpTrapEnterprise, snmpNotificationGroup=snmpNotificationGroup)
| 152.82439
| 2,264
| 0.787705
|
37046dc4b6b37ad802ca46e76bbdba1813cd77c5
| 5,127
|
py
|
Python
|
mars/tensor/expressions/lib/index_tricks.py
|
pingrunhuang/mars
|
ae920c374e9844d7426d0cc09c0d97059dc5341c
|
[
"Apache-2.0"
] | 1
|
2019-09-22T16:00:48.000Z
|
2019-09-22T16:00:48.000Z
|
mars/tensor/expressions/lib/index_tricks.py
|
turboFei/mars
|
cde691285d921add5460944764c7278e7ddec8ff
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/expressions/lib/index_tricks.py
|
turboFei/mars
|
cde691285d921add5460944764c7278e7ddec8ff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from numpy.lib.index_tricks import ndindex
from .. import datasource as _nx
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the Mars.tensor namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> import mars.tensor as mt
>>> mgrid = mt.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = mt.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(
int(math.ceil((key[k].stop - start)/(step*1.0))))
if (isinstance(step, float) or
isinstance(start, float) or
isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [np.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][slobj]
slobj[k] = np.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None:
start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
__all__ = ['ndindex', 'mgrid', 'ogrid']
| 32.04375
| 79
| 0.50868
|
2ae92f194d6333af24855367620993f48d11df97
| 1,943
|
py
|
Python
|
tests/test_is_ip_v4.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | 3
|
2020-08-20T10:27:13.000Z
|
2021-11-02T20:28:16.000Z
|
tests/test_is_ip_v4.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | null | null | null |
tests/test_is_ip_v4.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from string_utils import is_ip_v4
class IsIpV4TestCase(TestCase):
def test_return_false_for_non_string_objects(self):
# noinspection PyTypeChecker
self.assertFalse(is_ip_v4(None))
# noinspection PyTypeChecker
self.assertFalse(is_ip_v4(1))
# noinspection PyTypeChecker
self.assertFalse(is_ip_v4([]))
# noinspection PyTypeChecker
self.assertFalse(is_ip_v4({'a': 1}))
# noinspection PyTypeChecker
self.assertFalse(is_ip_v4(True))
def test_ip_cannot_be_blank(self):
self.assertFalse(is_ip_v4(''))
self.assertFalse(is_ip_v4(' '))
def test_recognize_ip_strings(self):
self.assertTrue(is_ip_v4('127.0.0.1'))
self.assertTrue(is_ip_v4('0.0.0.0'))
self.assertTrue(is_ip_v4('255.255.10.1'))
def test_returns_false_if_ipv4_out_of_range(self):
self.assertFalse(is_ip_v4('999.200.100.75'))
self.assertFalse(is_ip_v4('255.999.100.75'))
self.assertFalse(is_ip_v4('255.200.999.75'))
self.assertFalse(is_ip_v4('255.200.100.999'))
def test_ip_cannot_contain_spaces(self):
self.assertFalse(is_ip_v4(' 127.0.0.1 '))
self.assertFalse(is_ip_v4('0.0.0.0 '))
self.assertFalse(is_ip_v4(' 255.255.10.1'))
self.assertFalse(is_ip_v4('255. 255.10.1'))
def test_ip_cannot_have_multiple_dots(self):
self.assertFalse(is_ip_v4('127.0.0..1'))
self.assertFalse(is_ip_v4('0..0.0.0'))
self.assertFalse(is_ip_v4('255.255.10.1.'))
def test_numbers_cannot_be_divided_by_other_signs_in_ipv4(self):
self.assertFalse(is_ip_v4('127:0:0:1'))
self.assertFalse(is_ip_v4('127-0-0-1'))
self.assertFalse(is_ip_v4('0_0_0_0'))
self.assertFalse(is_ip_v4('255,255,10,1'))
def test_ip_v6_is_not_recognized(self):
self.assertFalse(is_ip_v4('2001:db8:85a3:0:0:8a2e:370:7334'))
| 34.087719
| 69
| 0.670612
|
328c635251adb6622ce01ce8aa17f72e4ece2a55
| 2,755
|
py
|
Python
|
Decorator/index.py
|
edgardeng/design-patterns-in-python
|
3b18fa8eabf2f8bc20c20103cc837ea90606c2d9
|
[
"MIT"
] | null | null | null |
Decorator/index.py
|
edgardeng/design-patterns-in-python
|
3b18fa8eabf2f8bc20c20103cc837ea90606c2d9
|
[
"MIT"
] | null | null | null |
Decorator/index.py
|
edgardeng/design-patterns-in-python
|
3b18fa8eabf2f8bc20c20103cc837ea90606c2d9
|
[
"MIT"
] | null | null | null |
class Component():
"""
The base Component interface defines operations that can be altered by
decorators.
"""
def operation(self) -> str:
pass
class ConcreteComponent(Component):
"""
Concrete Components provide default implementations of the operations. There
might be several variations of these classes.
"""
def operation(self) -> str:
return "operation in ConcreteComponent"
class Decorator(Component):
"""
The base Decorator class follows the same interface as the other components.
The primary purpose of this class is to define the wrapping interface for
all concrete decorators. The default implementation of the wrapping code
might include a field for storing a wrapped component and the means to
initialize it.
"""
_component: Component = None
def __init__(self, component: Component) -> None:
self._component = component
@property
def component(self) -> str:
"""
The Decorator delegates all work to the wrapped component.
"""
return self._component
def operation(self) -> str:
return self._component.operation()
class ConcreteDecoratorA(Decorator):
"""
Concrete Decorators call the wrapped object and alter its result in some
way.
"""
def operation(self) -> str:
"""
Decorators may call parent implementation of the operation, instead of
calling the wrapped object directly. This approach simplifies extension
of decorator classes.
"""
return f"ConcreteDecorator A ({self.component.operation()})"
class ConcreteDecoratorB(Decorator):
"""
Decorators can execute their behavior either before or after the call to a
wrapped object.
"""
def operation(self) -> str:
return f"ConcreteDecorator B ({self.component.operation()})"
def client_code(component: Component) -> None:
"""
The client code works with all objects using the Component interface. This
way it can stay independent of the concrete classes of components it works
with.
"""
print(f"RESULT: {component.operation()}", end="")
# ...
if __name__ == "__main__":
# This way the client code can support both simple components...
simple = ConcreteComponent()
print("Client: I've got a simple component:")
client_code(simple)
print("\n")
# ...as well as decorated ones.
#
# Note how decorators can wrap not only simple components but the other
# decorators as well.
decorator1 = ConcreteDecoratorA(simple)
decorator2 = ConcreteDecoratorB(decorator1)
print("Client: Now I've got a decorated component:")
client_code(decorator2)
print("\n")
| 27.55
| 80
| 0.673321
|
1f776c7718e625b2898c3b050cbc547bd02696c3
| 9,022
|
py
|
Python
|
examples/swap.py
|
dpinte/pyql
|
9e5b272947b269ee07c7741d05d21675983d69b4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/swap.py
|
dpinte/pyql
|
9e5b272947b269ee07c7741d05d21675983d69b4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/swap.py
|
dpinte/pyql
|
9e5b272947b269ee07c7741d05d21675983d69b4
|
[
"BSD-3-Clause"
] | null | null | null |
""" Port of the swap example of QuantLib SWIG to PyQL.
Warning: this is work in progress and currently not working.
"""
from quantlib.indexes.euribor import Euribor6M
from quantlib.pricingengines.swap import DiscountingSwapEngine
from quantlib.settings import Settings
from quantlib.quotes import SimpleQuote
from quantlib.termstructures.yields.api import DepositRateHelper, FraRateHelper
from quantlib.termstructures.yields.api import FuturesRateHelper, SwapRateHelper
from quantlib.termstructures.yields.api import YieldTermStructure
from quantlib.termstructures.yields.api import PiecewiseYieldCurve
from quantlib.time.api import Actual360, Date, November, TARGET, Weeks, Annual
from quantlib.time.api import Months, Years, Period, ModifiedFollowing
from quantlib.time.api import Unadjusted, Thirty360, Semiannual, Schedule
from quantlib.time.api import Forward, ActualActual, ISDA
# global data
calendar = TARGET()
todaysDate = Date(6,November,2001);
Settings.instance().evaluation_date = todaysDate
settlementDate = Date(8,November,2001);
# market quotes
deposits = { (1,Weeks): 0.0382,
(1,Months): 0.0372,
(3,Months): 0.0363,
(6,Months): 0.0353,
(9,Months): 0.0348,
(1,Years): 0.0345 }
FRAs = { (3,6): 0.037125,
(6,9): 0.037125,
(9,12): 0.037125 }
futures = { Date(19,12,2001): 96.2875,
Date(20,3,2002): 96.7875,
Date(19,6,2002): 96.9875,
Date(18,9,2002): 96.6875,
Date(18,12,2002): 96.4875,
Date(19,3,2003): 96.3875,
Date(18,6,2003): 96.2875,
Date(17,9,2003): 96.0875 }
swaps = { (2,Years): 0.037125,
(3,Years): 0.0398,
(5,Years): 0.0443,
(10,Years): 0.05165,
(15,Years): 0.055175 }
# convert them to Quote objects
#for n,unit in deposits.keys():
# deposits[(n,unit)] = SimpleQuote(deposits[(n,unit)])
for n,m in FRAs.keys():
FRAs[(n,m)] = SimpleQuote(FRAs[(n,m)])
for d in futures.keys():
futures[d] = SimpleQuote(futures[d])
#for n,unit in swaps.keys():
# swaps[(n,unit)] = SimpleQuote(swaps[(n,unit)])
# build rate helpers
dayCounter = Actual360()
settlementDays = 2
depositHelpers = [ DepositRateHelper(deposits[(n,unit)],
Period(n,unit), settlementDays,
calendar, ModifiedFollowing,
False, dayCounter)
for n, unit in [(1,Weeks),(1,Months),(3,Months),
(6,Months),(9,Months),(1,Years)] ]
dayCounter = Actual360()
settlementDays = 2
fraHelpers = [ FraRateHelper(FRAs[(n,m)],
n, m, settlementDays,
calendar, ModifiedFollowing,
False, dayCounter)
for n, m in FRAs.keys() ]
dayCounter = Actual360()
months = 3
futuresHelpers = [ FuturesRateHelper(futures[d],
d, months,
calendar, ModifiedFollowing,
True, dayCounter)
for d in futures.keys() ]
settlementDays = 2
fixedLegFrequency = Annual
fixedLegTenor = Period(1,Years)
fixedLegAdjustment = Unadjusted
fixedLegDayCounter = Thirty360()
floatingLegFrequency = Semiannual
floatingLegTenor = Period(6,Months)
floatingLegAdjustment = ModifiedFollowing
swapHelpers = [ SwapRateHelper.from_tenor(swaps[(n,unit)],
Period(n,unit), calendar,
fixedLegFrequency, fixedLegAdjustment,
fixedLegDayCounter, Euribor6M())
for n, unit in swaps.keys() ]
### Curve building
ts_daycounter = ActualActual(ISDA)
# term-structure construction
helpers = depositHelpers + swapHelpers
depoSwapCurve = PiecewiseYieldCurve(
'discount', 'loglinear', settlementDate, helpers, ts_daycounter
)
helpers = depositHelpers[:2] + futuresHelpers + swapHelpers[1:]
depoFuturesSwapCurve = PiecewiseYieldCurve(
'discount', 'loglinear',settlementDate, helpers, ts_daycounter
)
helpers = depositHelpers[:3] + fraHelpers + swapHelpers
depoFraSwapCurve = PiecewiseYieldCurve(
'discount', 'loglinear', settlementDate, helpers, ts_daycounter
)
# Term structures that will be used for pricing:
discountTermStructure = YieldTermStructure(relinkable=True)
forecastTermStructure = YieldTermStructure(relinkable=True)
### SWAPS TO BE PRICED
nominal = 1000000
length = 5
maturity = calendar.advance(settlementDate,length,Years)
payFixed = True
fixedLegFrequency = Annual
fixedLegAdjustment = Unadjusted
fixedLegDayCounter = Thirty360()
fixedRate = 0.04
floatingLegFrequency = Semiannual
spread = 0.0
fixingDays = 2
index = Euribor6M(forecastTermStructure)
floatingLegAdjustment = ModifiedFollowing
floatingLegDayCounter = index.dayCounter()
fixedSchedule = Schedule(settlementDate, maturity,
fixedLegTenor, calendar,
fixedLegAdjustment, fixedLegAdjustment,
Forward, False)
floatingSchedule = Schedule(settlementDate, maturity,
floatingLegTenor, calendar,
floatingLegAdjustment, floatingLegAdjustment,
Forward, False)
spot = VanillaSwap(VanillaSwap.Payer, nominal,
fixedSchedule, fixedRate, fixedLegDayCounter,
floatingSchedule, index, spread,
floatingLegDayCounter)
spot.setPricingEngine(swapEngine)
forwardStart = calendar.advance(settlementDate,1,Years)
forwardEnd = calendar.advance(forwardStart,length,Years)
fixedSchedule = Schedule(forwardStart, forwardEnd,
fixedLegTenor, calendar,
fixedLegAdjustment, fixedLegAdjustment,
Forward, False)
floatingSchedule = Schedule(forwardStart, forwardEnd,
floatingLegTenor, calendar,
floatingLegAdjustment, floatingLegAdjustment,
Forward, False)
forward = VanillaSwap(VanillaSwap.Payer, nominal,
fixedSchedule, fixedRate, fixedLegDayCounter,
floatingSchedule, index, spread,
floatingLegDayCounter)
swapEngine = DiscountingSwapEngine(discountTermStructure)
forward.setPricingEngine(swapEngine)
# price on the bootstrapped curves
def formatPrice(p,digits=2):
format = '%%.%df' % digits
return format % p
def formatRate(r,digits=2):
format = '%%.%df %%%%' % digits
return format % (r*100)
headers = ("term structure", "net present value",
"fair spread", "fair fixed rate" )
separator = " | "
format = ''
width = 0
for h in headers[:-1]:
format += '%%%ds' % len(h)
format += separator
width += len(h) + len(separator)
format += '%%%ds' % len(headers[-1])
width += len(headers[-1])
rule = "-" * width
dblrule = "=" * width
tab = " " * 8
def report(swap, name):
print format % (name, formatPrice(swap.NPV(),2),
formatRate(swap.fairSpread(),4),
formatRate(swap.fairRate(),4))
print dblrule
print "5-year market swap-rate = %s" % formatRate(swaps[(5,Years)].value())
print dblrule
# price on two different term structures
print tab + "5-years swap paying %s" % formatRate(fixedRate)
print separator.join(headers)
print rule
discountTermStructure.linkTo(depoFuturesSwapCurve)
forecastTermStructure.linkTo(depoFuturesSwapCurve)
report(spot,'depo-fut-swap')
discountTermStructure.linkTo(depoFraSwapCurve)
forecastTermStructure.linkTo(depoFraSwapCurve)
report(spot,'depo-FRA-swap')
print rule
# price the 1-year forward swap
print tab + "5-years, 1-year forward swap paying %s" % formatRate(fixedRate)
print rule
discountTermStructure.linkTo(depoFuturesSwapCurve)
forecastTermStructure.linkTo(depoFuturesSwapCurve)
report(forward,'depo-fut-swap')
discountTermStructure.linkTo(depoFraSwapCurve)
forecastTermStructure.linkTo(depoFraSwapCurve)
report(forward,'depo-FRA-swap')
# modify the 5-years swap rate and reprice
swaps[(5,Years)].setValue(0.046)
print dblrule
print "5-year market swap-rate = %s" % formatRate(swaps[(5,Years)].value())
print dblrule
print tab + "5-years swap paying %s" % formatRate(fixedRate)
print separator.join(headers)
print rule
discountTermStructure.linkTo(depoFuturesSwapCurve)
forecastTermStructure.linkTo(depoFuturesSwapCurve)
report(spot,'depo-fut-swap')
discountTermStructure.linkTo(depoFraSwapCurve)
forecastTermStructure.linkTo(depoFraSwapCurve)
report(spot,'depo-FRA-swap')
print rule
print tab + "5-years, 1-year forward swap paying %s" % formatRate(fixedRate)
print rule
discountTermStructure.linkTo(depoFuturesSwapCurve)
forecastTermStructure.linkTo(depoFuturesSwapCurve)
report(forward,'depo-fut-swap')
discountTermStructure.linkTo(depoFraSwapCurve)
forecastTermStructure.linkTo(depoFraSwapCurve)
report(forward,'depo-FRA-swap')
| 32.336918
| 80
| 0.66759
|
3f0c03ab7437ce427da0dcd17ab85784b2641d3c
| 3,302
|
py
|
Python
|
virtparade/utils/strings.py
|
major1201/virtparade
|
a8fd18ea7c676317b71b748367694108b0984217
|
[
"MIT"
] | 2
|
2019-01-29T20:48:12.000Z
|
2020-06-03T12:58:28.000Z
|
virtparade/utils/strings.py
|
major1201/virtparade
|
a8fd18ea7c676317b71b748367694108b0984217
|
[
"MIT"
] | null | null | null |
virtparade/utils/strings.py
|
major1201/virtparade
|
a8fd18ea7c676317b71b748367694108b0984217
|
[
"MIT"
] | null | null | null |
# encoding= utf-8
from __future__ import division, absolute_import, with_statement, print_function
import string
import random
REG_IP = "^(([2][5][0-5]|[2][0-4][0-9]|[1][0-9]{2}|[1-9][0-9]|[0-9])[.]){3}([2][5][0-5]|[2][0-4][0-9]|[1][0-9]{2}|[1-9][0-9]|[0-9])$"
REG_DOMAIN = "^[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+\.?$"
def is_none(s):
return s is None
def is_not_none(s):
return not is_none(s)
def is_empty(s):
return is_none(s) or len(s) == 0
def is_not_empty(s):
return not is_empty(s)
def is_blank(s):
if is_empty(s):
return True
try:
return is_empty(s.strip(string.whitespace))
except:
return False
def is_not_blank(s):
return not is_blank(s)
def strip_to_none(s):
return None if is_blank(s) else s.strip()
def strip_to_empty(s):
return "" if is_blank(s) else s.strip()
def ltrim(s, replacement=" "):
return s[len(replacement):] if s.startswith(replacement) else s
def rtrim(s, replacement=" "):
return s[:-len(replacement)] if s.endswith(replacement) else s
def trim(s, replacement=" "):
return rtrim(ltrim(s, replacement), replacement)
def equals_ignore_case(s1, s2):
return False if s1 is None or s2 is None else s1.lower() == s2.lower()
def to_json(o, escape=False):
import json
from datetime import date
from datetime import datetime
class CJsonEncoder(json.JSONEncoder):
def __init__(self, **kwargs):
super(CJsonEncoder, self).__init__(**kwargs)
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj, date):
return obj.strftime("%Y-%m-%d")
else:
return super(CJsonEncoder, self).default(obj)
json_str = json.dumps(o, cls=CJsonEncoder, ensure_ascii=False)
return json_str.replace('\\', '\\\\').replace("'", "\\'") if escape else json_str
def uuid():
import uuid
return str(uuid.uuid4()).replace("-", "")
def get_between(ori, start, end):
ori = str(ori)
start = str(start)
end = str(end)
s = ori.find(start)
if s >= 0:
e = ori.find(end, s + len(start))
if e >= 0:
return ori[s + len(start):e]
return ""
def get_all_between(ori, start, end):
ret = []
ori = str(ori)
start = str(start)
end = str(end)
find_start = 0
ls = len(start)
le = len(end)
while True:
s = ori.find(start, find_start)
if s >= 0:
e = ori.find(end, s + ls)
if e >= 0:
ret.append(ori[s + ls:e])
find_start = e + le
continue
break
return ret
def get_non_empty_str(_dict, key, dv=None):
assert isinstance(_dict, dict)
val = _dict.get(key)
return dv if is_empty(val) else val
def random_str(length, ascii_letters=True, digits=True, punctuation=False):
random.seed()
chars_arr = []
if ascii_letters:
chars_arr.append(string.ascii_letters * 2)
if digits:
chars_arr.append(string.digits * 3)
if punctuation:
chars_arr.append(string.punctuation)
chars = ''.join(chars_arr)
return ''.join([random.choice(chars) for _ in range(length)])
| 23.927536
| 133
| 0.589945
|
be427ab3ebef27e214830bf1c7fcc509daa57c08
| 45,819
|
py
|
Python
|
NotionDump/Parser/base_parser.py
|
delta1037/notion-dump-kernel
|
162fe2b72f9d4e571407a4dbe1f3c90b3c40fd5d
|
[
"MIT"
] | 8
|
2022-01-12T09:21:33.000Z
|
2022-02-22T18:40:50.000Z
|
NotionDump/Parser/base_parser.py
|
delta1037/notion-dump-kernel
|
162fe2b72f9d4e571407a4dbe1f3c90b3c40fd5d
|
[
"MIT"
] | 16
|
2022-01-11T12:01:33.000Z
|
2022-03-02T12:09:31.000Z
|
NotionDump/Parser/base_parser.py
|
delta1037/notion-dump-kernel
|
162fe2b72f9d4e571407a4dbe1f3c90b3c40fd5d
|
[
"MIT"
] | null | null | null |
# author: delta1037
# Date: 2022/01/08
# mail:geniusrabbit@qq.com
import copy
import NotionDump
from NotionDump.utils import content_format, common_op
from NotionDump.utils import internal_var
from urllib.parse import unquote
from NotionDump.utils.content_format import color_transformer, color_transformer_db, format_date_or_time
class BaseParser:
def __init__(self, base_id, export_child=False):
self.base_id = base_id.replace('-', '')
self.export_child = export_child
# 设置变量存放子page 字典
self.child_pages = {}
def set_new_id(self, parent_id):
self.base_id = parent_id
# 获取子页面字典,只返回一次,离台概不负责
def get_child_pages_dic(self):
child_pages = copy.deepcopy(self.child_pages)
self.child_pages.clear() # 清空已有的内容
return child_pages
# 文本的格式生成
@staticmethod
def __annotations_parser(block_handle, str_plain):
if str_plain is None or str_plain == "":
return ""
last_char = str_plain[-1:]
if last_char == "\n" or last_char == "\t":
str_ret = str_plain[0:-1]
else:
str_ret = str_plain
if block_handle["code"]:
str_ret = "`" + str_ret + "`"
if block_handle["underline"]:
str_ret = "<u>" + str_ret + "</u>"
if block_handle["bold"]:
str_ret = "**" + str_ret + "**"
if block_handle["italic"]:
str_ret = "*" + str_ret + "*"
if block_handle["color"] != "default":
# 添加颜色,区分背景色和前景色
if block_handle["color"].find("_background") != -1:
bg_color = block_handle["color"][0:block_handle["color"].rfind('_')]
str_ret = "<span style=\"background-color:" + color_transformer(bg_color, background=True) + "\">" + str_ret + "</span>"
else:
str_ret = "<font color=\"" + color_transformer(block_handle["color"], background=False) + "\">" + str_ret + "</font>"
if block_handle["strikethrough"]:
str_ret = "~~" + str_ret + "~~"
if last_char == "\n" or last_char == "\t":
str_ret += last_char
return str_ret
def __text_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN, is_db_title=False):
if block_handle["type"] != "text":
common_op.debug_log(
"text type error! id=" + self.base_id + " not type " + block_handle["type"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
text_str = ""
if "plain_text" in block_handle:
text_str = block_handle["plain_text"]
if text_str is None:
text_str = ""
# 如果有链接
text_url = block_handle["href"]
if text_url is not None and parser_type == NotionDump.PARSER_TYPE_MD and not is_db_title: # 数据库标题越过链接解析
# 文字有链接内容,分为网络链接和本地链接
if text_url.startswith("http"):
# 网络链接,直接一步到位
text_str = content_format.get_url_format(text_url, text_str)
else:
# Page或者数据库类型,等待重定位
if text_url.find("=") != -1:
page_id = text_url[text_url.rfind("/") + 1:text_url.rfind("?")]
common_op.debug_log("### page id " + page_id + " is database")
common_op.add_new_child_page(
self.child_pages,
key_id=page_id + "_" + text_str,
link_id=page_id,
link_src=text_url,
page_type="database",
page_name=text_str
)
else:
page_id = text_url[text_url.rfind("/") + 1:]
common_op.debug_log("### page id " + page_id + " is page")
common_op.add_new_child_page(
self.child_pages,
key_id=page_id + "_" + text_str,
link_id=page_id,
link_src=text_url,
page_type="page",
page_name=text_str
)
# 将页面保存,等待进一步递归操作
# 保存子页面信息
common_op.debug_log("child_page_parser add page id = " + page_id + "_" + text_str, level=NotionDump.DUMP_MODE_DEFAULT)
text_str = content_format.get_page_format_md(page_id + "_" + text_str, text_str,
export_child=self.export_child)
if parser_type == NotionDump.PARSER_TYPE_MD:
# 解析annotations部分,为text_str添加格式
return self.__annotations_parser(block_handle["annotations"], text_str)
else:
return text_str
def __text_block_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN, is_db_title=False):
paragraph_ret = ""
if block_handle["type"] == "text":
paragraph_ret = self.__text_parser(block_handle, parser_type)
elif block_handle["type"] == "equation":
paragraph_ret = self.__equation_inline_parser(block_handle)
elif block_handle["type"] == "mention":
paragraph_ret = self.__mention_parser(block_handle, parser_type, is_db_title=is_db_title)
else:
common_op.debug_log(
"text type " + block_handle["type"] + " error! parent_id= " + self.base_id,
level=NotionDump.DUMP_MODE_DEFAULT)
return paragraph_ret
def __text_list_parser(self, text_list, parser_type=NotionDump.PARSER_TYPE_PLAIN, is_db=False, is_db_title=False):
plain_text = ""
if text_list is not None:
for text_block in text_list:
plain_text += self.__text_block_parser(text_block, parser_type, is_db_title=is_db_title)
if is_db:
# 数据库内容特殊字符校对
return plain_text.replace("|", "\\|")
else:
return plain_text
# TODO : people只获取了名字和ID,后续可以做深度解析用户相关内容
def __people_parser(self, block_handle):
if block_handle["object"] != "user":
common_op.debug_log("people type error! id=" + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
# 优先获取名字
if "name" in block_handle.keys():
return block_handle["name"]
# 如果无法获取名字则返回id
return block_handle["id"].replace('-', '')
def __user_parser(self, block_handle):
if block_handle["type"] != "user":
common_op.debug_log("user type error! id=" + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
user_body = block_handle["user"]
return self.__people_parser(user_body)
def __db_file_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "file":
common_op.debug_log("file type error! id=" + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
filename = block_handle["name"]
file_url = block_handle["file"]["url"]
# 解析文件的ID
url_prefix = file_url[0:file_url.rfind("/")]
file_id = url_prefix[url_prefix.rfind("/") + 1:].replace('-', '')
common_op.debug_log("file id is : " + file_id)
if filename == "":
# 如果文件没有名字使用id作为默认名字
filename = file_id
common_op.add_new_child_page(
self.child_pages,
key_id=file_id,
link_src=file_url,
page_type="file",
page_name=filename
)
common_op.debug_log(
"file_parser add page id = " + file_id + " name : " + filename, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
# 格式处理简单格式(也可以转换成markdown格式[]())
if parser_type == NotionDump.PARSER_TYPE_MD:
# file转换成文件链接的形式
return content_format.get_file_format_md(filename, file_url, file_id, self.export_child)
else:
return content_format.get_file_format_plain(filename, file_url)
# "$ equation_inline $"
def __equation_inline_parser(self, block_handle):
if block_handle["type"] != "equation":
common_op.debug_log("equation inline type error! id=" + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
# 公式删除富文本格式
# return content_format.get_equation_inline(
# self.__annotations_parser(block_handle["annotations"], block_handle["plain_text"])
# )
return content_format.get_equation_inline(block_handle["plain_text"])
# "$$ equation_block $$"
def __equation_block_parser(self, block_handle):
if block_handle["expression"] is None:
common_op.debug_log("equation block no expression! id=" + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
return content_format.get_equation_block(block_handle["expression"])
# Attention!!! 关于链接到其它的Page可能需要递归处理
def __page_parser(self, block_handle):
if block_handle["type"] != "page":
common_op.debug_log("page type error! parent_id= " + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
page_body = block_handle["page"]
return page_body["id"].replace('-', '')
# 提及到其它页面,日期,用户
def __mention_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN, is_db_title=False):
if block_handle["type"] != "mention":
common_op.debug_log("mention type error! parent_id= " + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
mention_body = block_handle["mention"]
mention_plain = ""
if mention_body["type"] == "date":
mention_plain = self.date_parser(mention_body)
elif mention_body["type"] == "user":
mention_plain = self.__user_parser(mention_body)
elif mention_body["type"] == "link_preview" and "url" in mention_body["link_preview"].keys():
mention_plain = mention_body["link_preview"]["url"]
elif mention_body["type"] == "database":
database_id = mention_body["database"]["id"].replace('-', '')
key_id = database_id + "_mention"
common_op.debug_log("__mention_parser add database id = " + database_id)
# 获取页面的名字
database_name = block_handle["plain_text"]
database_link = block_handle["href"]
if is_db_title:
mention_plain = database_name
else:
common_op.add_new_child_page(
self.child_pages,
key_id=key_id,
link_id=database_id,
link_src=database_link,
page_type="database",
page_name=database_name
)
common_op.debug_log(
"file_parser add page id = " + key_id + " name : " + database_name, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
if parser_type == NotionDump.PARSER_TYPE_MD:
mention_plain = content_format.get_page_format_md(key_id, database_name, export_child=self.export_child)
else:
mention_plain = database_name
elif mention_body["type"] == "page":
page_id = self.__page_parser(mention_body)
key_id = page_id + "_mention"
common_op.debug_log("__mention_parser add page id = " + page_id)
# 获取页面的名字
page_name = block_handle["plain_text"]
page_link = block_handle["href"]
if is_db_title:
mention_plain = page_name
else:
# 提及页面按照链接页面处理
common_op.add_new_child_page(
self.child_pages,
key_id=key_id,
link_id=page_id,
link_src=page_link,
page_type="page",
page_name=page_name
)
common_op.debug_log(
"file_parser add page id = " + key_id + " name : " + page_name, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
if parser_type == NotionDump.PARSER_TYPE_MD:
mention_plain = content_format.get_page_format_md(key_id, page_name, export_child=self.export_child)
else:
mention_plain = page_name
else:
common_op.debug_log("unknown mention type " + mention_body["type"], level=NotionDump.DUMP_MODE_DEFAULT)
if parser_type == NotionDump.PARSER_TYPE_MD:
# 解析annotations部分,为mention_plain添加格式
return self.__annotations_parser(block_handle["annotations"],
content_format.get_mention_format(mention_plain))
else:
return content_format.get_mention_format(mention_plain)
def __table_row_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "table_row":
common_op.debug_log("table_row type error! parent_id= " + self.base_id, level=NotionDump.DUMP_MODE_DEFAULT)
return ""
table_col_cells = block_handle["table_row"]["cells"]
table_row = []
for cell in table_col_cells:
table_row.append(self.__text_list_parser(cell, parser_type))
return table_row
# 数据库 title
def title_parser(self, block_handle, page_id, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "title":
common_op.debug_log("title type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
db_page_title = self.__text_list_parser(block_handle["title"], parser_type, is_db=True, is_db_title=True)
if page_id == "":
# 如果page id是空的,说明只想要内容,不需要重定位
return db_page_title
if db_page_title != "":
# 如果存在子Page就加入到待解析队列
common_op.debug_log("title ret = " + db_page_title)
if parser_type != NotionDump.PARSER_TYPE_PLAIN:
common_op.debug_log("title_parser add page id = " + page_id, level=NotionDump.DUMP_MODE_DEFAULT)
else:
common_op.debug_log("title_parser add page id = " + page_id)
# 数据库里的都是子页面
common_op.add_new_child_page(self.child_pages, key_id=page_id, page_name=db_page_title)
# 如果有子页面就添加一个占位符,之后方便重定位
db_page_title = content_format.get_database_title_format(page_id, db_page_title, self.export_child)
return db_page_title
# 数据库 rich_text
def rich_text_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "rich_text":
common_op.debug_log("rich_text type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
return self.__text_list_parser(block_handle["rich_text"], parser_type, is_db=True)
# 数据库 multi_select
def multi_select_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "multi_select":
common_op.debug_log("multi_select type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
multi_select_list = block_handle["multi_select"]
ret_str = ""
if multi_select_list is None:
return ret_str
for multi_select in multi_select_list:
if ret_str != "":
ret_str += "," # 多个选项之间用“,”分割
if parser_type == NotionDump.PARSER_TYPE_MD:
ret_str += "<span style=\"background-color:" \
+ color_transformer_db(multi_select["color"]) \
+ "\"> " + multi_select["name"] + " </span>"
else:
ret_str += multi_select["name"]
return ret_str
# 数据库 select
def select_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "select":
common_op.debug_log("select type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
select = block_handle["select"]
ret_str = ""
if select is None:
return ret_str
if parser_type == NotionDump.PARSER_TYPE_MD:
ret_str = "<span style=\"background-color:" \
+ color_transformer_db(select["color"]) \
+ "\"> " + select["name"] + " </span>"
else:
ret_str = select["name"]
return ret_str
# 数据库 url
def url_parser(self, block_handle):
if block_handle["type"] != "url":
common_op.debug_log("url type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
url = block_handle["url"]
if url is None:
url = ""
return content_format.get_url_format(url)
# 数据库 email
def email_parser(self, block_handle):
if block_handle["type"] != "email":
common_op.debug_log("email type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
email = block_handle["email"]
ret_str = ""
if email is not None:
ret_str = email
return ret_str
# 数据库 checkbox
def checkbox_parser(self, block_handle):
if block_handle["type"] != "checkbox":
common_op.debug_log("checkbox type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
checkbox = block_handle["checkbox"]
if checkbox is True:
ret_str = NotionDump.MD_BOOL_TRUE
else:
ret_str = NotionDump.MD_BOOL_FALSE
return ret_str
# 数据库 phone_number
def phone_number_parser(self, block_handle):
if block_handle["type"] != "phone_number":
common_op.debug_log("phone_number type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
phone_number = block_handle["phone_number"]
ret_str = ""
if phone_number is not None:
ret_str = phone_number
return ret_str
# 数据库 date
def date_parser(self, block_handle):
if block_handle["type"] != "date":
common_op.debug_log("date type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
date = block_handle["date"]
if date is None:
return ""
return content_format.get_date_format(date["start"], date["end"])
# 数据库 people
def people_parser(self, block_handle):
if block_handle["type"] != "people":
common_op.debug_log("people type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
people_list = block_handle["people"]
ret_str = ""
if people_list is None:
return ret_str
for people in people_list:
if ret_str != "":
ret_str += "," # 多个用户之间用“,”分割
ret_str += self.__people_parser(people)
return ret_str
# 数据库 number
def number_parser(self, block_handle):
if block_handle["type"] != "number":
common_op.debug_log("number type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
number = block_handle["number"]
ret_str = ""
if number is None:
return ret_str
ret_str = number
return str(ret_str)
# 数据库 files
def files_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "files":
common_op.debug_log("files type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
files_list = block_handle["files"]
ret_str = ""
if files_list is None:
return ret_str
for file in files_list:
if ret_str != "":
if parser_type == NotionDump.PARSER_TYPE_MD:
ret_str += "<br>" # 多个文件之间用“<br>”分割
else:
ret_str += "," # 多个文件之间用“,”分割
ret_str += self.__db_file_parser(file, parser_type)
return ret_str
# 数据库 relation 数据
def relation_parser(self, block_handle):
if block_handle["type"] != "relation":
common_op.debug_log("relation type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
relation_list = block_handle["relation"]
relation_ret = ""
for relation_item in relation_list:
relation_id = relation_item["id"].replace("-", "")
# 按照软连接处理
common_op.add_new_child_page(
self.child_pages,
key_id=relation_id + "_relation",
link_id=relation_id,
page_type="page",
page_name=""
)
if relation_ret != "":
relation_ret += ","
relation_ret += content_format.get_database_title_format(relation_id + "_relation", "", self.export_child)
return relation_ret
# 数据库 formula 数据
def formula_parser(self, block_handle):
if block_handle["type"] != "formula":
common_op.debug_log("formula type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
formula_block = block_handle["formula"]
ret_str = ""
if formula_block["type"] == "string":
ret_str = formula_block["string"]
elif formula_block["type"] == "number":
ret_str = str(formula_block["number"])
elif formula_block["type"] == "boolean":
if formula_block["boolean"] is True:
ret_str = NotionDump.MD_BOOL_TRUE
else:
ret_str = NotionDump.MD_BOOL_FALSE
# ret_str = str(formula_block["boolean"])
elif formula_block["type"] == "date":
ret_str = self.date_parser(formula_block)
else:
ret_str = "[unknown_formula_type:" + formula_block["type"] + "]"
return ret_str
# 数据库 created_time
def created_time_parser(self, block_handle):
if block_handle["type"] != "created_time":
common_op.debug_log("created_time type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
return format_date_or_time(block_handle["created_time"])
# 数据库 last_edited_time
def last_edited_time_parser(self, block_handle):
if block_handle["type"] != "last_edited_time":
common_op.debug_log(
"last_edited_time type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
return format_date_or_time(block_handle["last_edited_time"])
def created_by_parser(self, block_handle):
if block_handle["type"] != "created_by":
common_op.debug_log("created_by type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
return self.__people_parser(block_handle["created_by"])
# 数据库 last_edited_by
def last_edited_by_parser(self, block_handle):
if block_handle["type"] != "last_edited_by":
common_op.debug_log(
"last_edited_by type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
return self.__people_parser(block_handle["last_edited_by"])
# Page paragraph
# mention
# date
# user
# page
# text
# equation
def paragraph_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
paragraph_ret = ""
if block_handle["type"] != "paragraph":
common_op.debug_log("paragraph type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return paragraph_ret
return self.__text_list_parser(block_handle["paragraph"]["rich_text"], parser_type)
# Page heading_1
def heading_1_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
heading_1_ret = ""
if block_handle["type"] != "heading_1":
common_op.debug_log("heading_1 type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return heading_1_ret
heading_1_ret = self.__text_list_parser(block_handle["heading_1"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
return "# " + heading_1_ret
else:
return heading_1_ret
# Page heading_2
def heading_2_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
heading_2_ret = ""
if block_handle["type"] != "heading_2":
common_op.debug_log("heading_2 type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return heading_2_ret
heading_2_ret = self.__text_list_parser(block_handle["heading_2"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
return "## " + heading_2_ret
else:
return heading_2_ret
# Page heading_3
def heading_3_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
heading_3_ret = ""
if block_handle["type"] != "heading_3":
common_op.debug_log("heading_3 type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return heading_3_ret
heading_3_ret = self.__text_list_parser(block_handle["heading_3"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
return "### " + heading_3_ret
else:
return heading_3_ret
# Page to_do
def to_do_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
to_do_ret = ""
if block_handle["type"] != "to_do":
common_op.debug_log("to_do type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return to_do_ret
to_do_ret = self.__text_list_parser(block_handle["to_do"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
if block_handle["to_do"]["checked"]:
return "- [x] " + to_do_ret
else:
return "- [ ] " + to_do_ret
else:
return to_do_ret
# Page bulleted_list_item
def bulleted_list_item_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
bulleted_list_item_ret = ""
if block_handle["type"] != "bulleted_list_item":
common_op.debug_log(
"bulleted_list_item type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return bulleted_list_item_ret
bulleted_list_item_ret = self.__text_list_parser(block_handle["bulleted_list_item"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
return "- " + bulleted_list_item_ret
else:
return bulleted_list_item_ret
# Page numbered_list_item
def numbered_list_item_parser(self, block_handle, list_index, parser_type=NotionDump.PARSER_TYPE_PLAIN):
numbered_list_item_ret = ""
if block_handle["type"] != "numbered_list_item":
common_op.debug_log(
"numbered_list_item type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return numbered_list_item_ret
numbered_list_item_ret = self.__text_list_parser(block_handle["numbered_list_item"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
return str(list_index) + ". " + numbered_list_item_ret
else:
return numbered_list_item_ret
# Page toggle
def toggle_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
toggle_ret = ""
if block_handle["type"] != "toggle":
common_op.debug_log("toggle type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return toggle_ret
toggle_ret = self.__text_list_parser(block_handle["toggle"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
return "- " + toggle_ret
else:
return toggle_ret
# Page divider
def divider_parser(self, block_handle):
divider_ret = ""
if block_handle["type"] != "divider":
common_op.debug_log("divider type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return divider_ret
divider_ret = NotionDump.MD_DIVIDER
return divider_ret
# Page callout
def callout_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
callout_ret = ""
if block_handle["type"] != "callout":
common_op.debug_log("callout type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return callout_ret
callout_ret = self.__text_list_parser(block_handle["callout"]["rich_text"], parser_type)
if parser_type == NotionDump.PARSER_TYPE_MD:
# 这里是否每一行都操作
return "> " + callout_ret
else:
return callout_ret
# Page code
def code_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
code_ret = ""
if block_handle["type"] != "code":
common_op.debug_log("code type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return code_ret
code_ret = self.__text_list_parser(block_handle["code"]["rich_text"], parser_type)
code_type = block_handle["code"]["language"]
if code_type is None:
code_type = ""
if parser_type == NotionDump.PARSER_TYPE_MD:
# 这里是否每一行都操作
return "```" + code_type + "\n" + code_ret + "\n```"
else:
return code_ret
# Page quote
def quote_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
quote_ret = ""
if block_handle["type"] != "quote":
common_op.debug_log("quote type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return quote_ret
quote_ret = self.__text_list_parser(block_handle["quote"]["rich_text"], parser_type)
# 最外层颜色
if block_handle["quote"]["color"] != "default":
# 添加颜色,区分背景色和前景色
if block_handle["quote"]["color"].find("_background") != -1:
bg_color = block_handle["quote"]["color"][0:block_handle["quote"]["color"].rfind('_')]
quote_ret = "<span style=\"background-color:" + color_transformer(bg_color, background=True) + "\">" + quote_ret + "</span>"
else:
quote_ret = "<font color=\"" + color_transformer(block_handle["quote"]["color"], background=False) + "\">" + quote_ret + "</font>"
if parser_type == NotionDump.PARSER_TYPE_MD:
# 这里是否每一行都操作
return "> " + quote_ret
else:
return quote_ret
# Page equation
def equation_parser(self, block_handle):
equation_ret = ""
if block_handle["type"] != "equation":
common_op.debug_log(" type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return equation_ret
return self.__equation_block_parser(block_handle["equation"])
# Page table_row
def table_row_parser(self, block_handle, first_row=False, parser_type=NotionDump.PARSER_TYPE_PLAIN):
table_row_ret = ""
if block_handle["type"] != "table_row":
common_op.debug_log("table_row type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return table_row_ret
table_row_list = self.__table_row_parser(block_handle, parser_type)
table_row_ret = "|"
for it in table_row_list:
table_row_ret += it + "|"
if first_row:
table_row_ret += "\n|"
for i in range(len(table_row_list)):
table_row_ret += " --- " + "|"
return table_row_ret
def child_page_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
child_page_ret = ""
if block_handle["type"] != "child_page":
common_op.debug_log("child_page type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return child_page_ret
page_body = block_handle["child_page"]
if page_body["title"] == "":
if parser_type == NotionDump.PARSER_TYPE_MD:
return content_format.get_page_format_md("NULL Page", "NULL Page", export_child=self.export_child)
else:
return content_format.get_page_format_plain("NULL Page")
else:
page_id = (block_handle["id"]).replace('-', '')
# 保存子页面信息
common_op.debug_log("child_page_parser add page id = " + page_id, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.add_new_child_page(self.child_pages, key_id=page_id, page_name=page_body["title"])
if parser_type == NotionDump.PARSER_TYPE_MD:
return content_format.get_page_format_md(page_id, page_body["title"], export_child=self.export_child)
else:
return content_format.get_page_format_plain(page_body["title"])
# Page child_database
def child_database_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "child_database":
common_op.debug_log("child_database type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
# 子数据库保存在页面表中,不解析
child_db_id = block_handle["id"].replace('-', '')
common_op.add_new_child_page(
self.child_pages,
key_id=child_db_id,
page_type="database",
page_name=block_handle["child_database"]["title"]
)
common_op.debug_log(
"child_database_parser add page id = " + child_db_id + "name : " + block_handle["child_database"]["title"], level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
# 子数据库要返回一个链接占位符,供后续解析使用
if parser_type == NotionDump.PARSER_TYPE_MD:
return content_format.get_page_format_md(
child_db_id,
block_handle["child_database"]["title"],
export_child=self.export_child
)
else:
return content_format.get_page_format_plain(block_handle["child_database"]["title"])
# Page image
def image_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "image":
common_op.debug_log("image type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
# 子数据库保存在页面表中,不解析
image_id = block_handle["id"].replace('-', '')
image_name = self.__text_list_parser(block_handle["image"]["caption"], parser_type)
image_url = ""
image_type = block_handle["image"]["type"]
if image_type in block_handle["image"].keys():
if "url" in block_handle["image"][image_type].keys():
image_url = block_handle["image"][image_type]["url"]
if image_url == "":
common_op.debug_log("unknown image type" + block_handle["image"]["type"],
level=NotionDump.DUMP_MODE_DEFAULT)
if image_name == "":
# 如果文件没有名字使用id作为默认名字
image_name = image_id
common_op.add_new_child_page(
self.child_pages,
key_id=image_id,
link_src=image_url,
page_type="image",
page_name=image_name
)
common_op.debug_log(
"image_parser add page id = " + image_id + "name : " + image_name, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
# 图片类型要返回一个链接占位符,供后续解析使用
if parser_type == NotionDump.PARSER_TYPE_MD:
return content_format.get_page_format_md(
image_id,
image_name,
export_child=self.export_child
)
else:
return content_format.get_page_format_plain(image_name)
# Page file(file,pdf,video)
def file_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
if block_handle["type"] != "file" and block_handle["type"] != "pdf" and block_handle["type"] != "video":
common_op.debug_log("file type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
block_type = block_handle["type"]
file_id = block_handle["id"].replace('-', '')
file_name = self.__text_list_parser(block_handle[block_type]["caption"], parser_type)
file_url = ""
file_type = block_handle[block_type]["type"]
if file_type in block_handle[block_type].keys():
if "url" in block_handle[block_type][file_type].keys():
file_url = block_handle[block_type][file_type]["url"]
if file_url == "":
common_op.debug_log("unknown block type" + block_handle[block_type]["type"] + " with null url",
level=NotionDump.DUMP_MODE_DEFAULT)
return ""
# 如果caption中没有文件名,尝试从url中分离
if file_name == "":
file_url_basic = file_url[0:file_url.rfind('?')]
file_name = file_url_basic[file_url_basic.rfind('/')+1:]
# url中分离的内容需要转码
file_name = unquote(file_name, 'utf-8')
if file_name == "":
# 如果文件没有名字使用file作为默认名字
file_name = "FILE"
common_op.add_new_child_page(
self.child_pages,
key_id=file_id,
link_src=file_url,
page_type="file",
page_name=file_name
)
common_op.debug_log(
"file_parser add page id = " + file_id + " name : " + file_name, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
# 文件类型要返回一个链接占位符,供后续解析使用
if parser_type == NotionDump.PARSER_TYPE_MD:
return content_format.get_page_format_md(
file_id,
file_name,
export_child=self.export_child
)
else:
return content_format.get_page_format_plain(file_name)
# Page bookmark
def bookmark_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
bookmark_ret = ""
if block_handle["type"] != "bookmark":
common_op.debug_log("bookmark type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return bookmark_ret
bookmark_name = self.__text_list_parser(block_handle["bookmark"]["caption"], parser_type)
if bookmark_name == "":
bookmark_name = "BOOKMARK"
bookmark_url = block_handle["bookmark"]["url"]
# bookmark 类型要返回一个链接占位符,供后续解析使用
if parser_type == NotionDump.PARSER_TYPE_MD:
# file转换成文件链接的形式
return content_format.get_file_format_md(bookmark_name, bookmark_url)
else:
return content_format.get_file_format_plain(bookmark_name, bookmark_url)
# Page embed
def embed_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
embed_ret = ""
if block_handle["type"] != "embed":
common_op.debug_log("embed type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return embed_ret
embed_name = self.__text_list_parser(block_handle["embed"]["caption"], parser_type)
if embed_name == "":
embed_name = "EMBED"
embed_url = block_handle["embed"]["url"]
# bookmark 类型要返回一个链接占位符,供后续解析使用
if parser_type == NotionDump.PARSER_TYPE_MD:
# file转换成文件链接的形式
return content_format.get_file_format_md(embed_name, embed_url)
else:
return content_format.get_file_format_plain(embed_name, embed_url)
# Page link_preview
def link_preview_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
link_preview_ret = ""
if block_handle["type"] != "link_preview":
common_op.debug_log("link_preview type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return link_preview_ret
link_preview_name = "LINK_PREVIEW"
link_preview_url = block_handle["link_preview"]["url"]
# bookmark 类型要返回一个链接占位符,供后续解析使用
if parser_type == NotionDump.PARSER_TYPE_MD:
# file转换成文件链接的形式
return content_format.get_file_format_md(link_preview_name, link_preview_url)
else:
return content_format.get_file_format_plain(link_preview_name, link_preview_url)
# Page link_to_page
def link_to_page_parser(self, block_handle, parser_type=NotionDump.PARSER_TYPE_PLAIN):
link_to_page_ret = ""
if block_handle["type"] != "link_to_page":
common_op.debug_log("link_to_page type error! parent_id= " + self.base_id + " id= " + block_handle["id"],
level=NotionDump.DUMP_MODE_DEFAULT)
return link_to_page_ret
link_page = block_handle["link_to_page"]
if link_page["type"] == "page_id":
page_id = link_page["page_id"].replace('-', '')
page_name = ""
key_id = page_id + "_link_page"
common_op.add_new_child_page(
self.child_pages,
key_id=key_id,
link_id=page_id,
page_type="page",
page_name=page_name
)
common_op.debug_log(
"link_to_page_parser add link_page key_id = " + key_id, level=NotionDump.DUMP_MODE_DEFAULT)
common_op.debug_log(internal_var.PAGE_DIC)
common_op.debug_log("#############")
common_op.debug_log(self.child_pages)
return content_format.get_page_format_md(
key_id,
page_name,
export_child=self.export_child
)
else:
common_op.debug_log("unknown type " + link_page["type"], level=NotionDump.DUMP_MODE_DEFAULT)
return link_to_page_ret
| 44.657895
| 155
| 0.592789
|
17ab7f450cd32852f0e466a7ba7d00b3e1a0fa3a
| 236
|
py
|
Python
|
src/radish/parser/__init__.py
|
radish-bdd/radish2
|
4ea71291aac8700ec2b284b7f2bbf79bf2970324
|
[
"MIT"
] | 182
|
2015-06-27T16:17:17.000Z
|
2022-03-19T11:49:10.000Z
|
src/radish/parser/__init__.py
|
radish-bdd/radish2
|
4ea71291aac8700ec2b284b7f2bbf79bf2970324
|
[
"MIT"
] | 357
|
2015-11-06T19:30:19.000Z
|
2022-03-28T19:49:04.000Z
|
src/radish/parser/__init__.py
|
radish-bdd/radish2
|
4ea71291aac8700ec2b284b7f2bbf79bf2970324
|
[
"MIT"
] | 52
|
2015-08-22T08:36:37.000Z
|
2022-03-01T06:04:06.000Z
|
"""
radish
~~~~~~
the root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
from radish.parser.core import FeatureFileParser # noqa
| 19.666667
| 56
| 0.711864
|
07bf23f579e8188558c5149156d25d1ca5f47132
| 298
|
py
|
Python
|
demo_app/demo_app/urls.py
|
Visgean/django-session-activity
|
470cd6468c69249ff066bf65cfda6795e05c34f7
|
[
"MIT"
] | 2
|
2017-12-28T02:51:44.000Z
|
2018-11-19T16:02:13.000Z
|
demo_app/demo_app/urls.py
|
Visgean/django-session-activity
|
470cd6468c69249ff066bf65cfda6795e05c34f7
|
[
"MIT"
] | 1
|
2017-06-14T15:25:26.000Z
|
2017-06-14T15:25:26.000Z
|
demo_app/demo_app/urls.py
|
Visgean/django-session-activity
|
470cd6468c69249ff066bf65cfda6795e05c34f7
|
[
"MIT"
] | 3
|
2017-12-28T02:51:52.000Z
|
2019-12-12T14:25:55.000Z
|
from django.conf.urls import url, include
from django.contrib import admin
from session_log import urls as session_urls
from simple_login import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r"^login/$", views.SimpleLogin.as_view()),
url(r"^", include(session_urls)),
]
| 24.833333
| 50
| 0.724832
|
9b996ebd6f9bce516e26a3b6014a8c92f8167218
| 2,271
|
py
|
Python
|
sgains/commands/tests/test_mappable_regions_command.py
|
seqpipe/sgains
|
70a0e2b4087f7d4ac8db98f3e478f1ad36c2581b
|
[
"MIT"
] | null | null | null |
sgains/commands/tests/test_mappable_regions_command.py
|
seqpipe/sgains
|
70a0e2b4087f7d4ac8db98f3e478f1ad36c2581b
|
[
"MIT"
] | null | null | null |
sgains/commands/tests/test_mappable_regions_command.py
|
seqpipe/sgains
|
70a0e2b4087f7d4ac8db98f3e478f1ad36c2581b
|
[
"MIT"
] | null | null | null |
'''
Created on Aug 2, 2017
@author: lubo
'''
def test_mappable_regions_long(
argparser, tests_config, mappable_regions_command, mocker):
mappable_regions_command.add_options(tests_config)
argv = [
"--dry-run", "--force",
"--config", "tests/data/scpipe_tests.yml",
"mappable-regions",
"--mappable-dir", "data/proba",
"--genome-index", "genomeindex",
"--genome-dir", "data/hg19/",
"--read-length", "200",
"--bowtie-opts", "-1 -2 -3",
]
with mocker.patch("os.path.exists"), \
mocker.patch("sgains.config.Config.mapping_reads_filenames"), \
mocker.patch("os.listdir"):
args = argparser.parse_args(argv)
args.func(args)
config = mappable_regions_command.config
assert config is not None
assert config.force
assert config.dry_run
assert config.genome.work_dir == "data/hg19/"
assert config.genome.index == "genomeindex"
assert config.mappable_regions.length == 200
assert config.mappable_regions.work_dir == "data/proba"
assert config.mappable_regions.bowtie_opts == "-1 -2 -3"
def test_mappable_regions_short(
argparser, tests_config, mappable_regions_command, mocker):
mappable_regions_command.add_options(tests_config)
argv = [
"-n", "-F",
"--config", "tests/data/scpipe_tests.yml",
"mappable-regions",
"-m", "data/proba",
"-G", "genomeindex",
"--genome-dir", "data/hg19/",
"-l", "200",
"--bowtie-opts", "-1 -2 -3",
]
with mocker.patch("os.path.exists"), \
mocker.patch("sgains.config.Config.mapping_reads_filenames"), \
mocker.patch("os.listdir"):
args = argparser.parse_args(argv)
args.func(args)
config = mappable_regions_command.config
assert config is not None
assert config.force
assert config.dry_run
assert config.genome.work_dir == "data/hg19/"
assert config.genome.index == "genomeindex"
assert config.mappable_regions.length == 200
assert config.mappable_regions.work_dir == "data/proba"
assert config.mappable_regions.bowtie_opts == "-1 -2 -3"
| 29.493506
| 75
| 0.607662
|
6f074ab310d7e569443a022b41b1b0eef8f66580
| 10,242
|
py
|
Python
|
api/ccxt-master/python/ccxt/async/__init__.py
|
EdgarSargsyan/post
|
da26b98f4e68df5510fa0603645b1c1c6633f058
|
[
"MIT"
] | null | null | null |
api/ccxt-master/python/ccxt/async/__init__.py
|
EdgarSargsyan/post
|
da26b98f4e68df5510fa0603645b1c1c6633f058
|
[
"MIT"
] | null | null | null |
api/ccxt-master/python/ccxt/async/__init__.py
|
EdgarSargsyan/post
|
da26b98f4e68df5510fa0603645b1c1c6633f058
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.10.502'
# -----------------------------------------------------------------------------
from ccxt.async.base.exchange import Exchange # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.async._1broker import _1broker # noqa: F401
from ccxt.async._1btcxe import _1btcxe # noqa: F401
from ccxt.async.acx import acx # noqa: F401
from ccxt.async.allcoin import allcoin # noqa: F401
from ccxt.async.anxpro import anxpro # noqa: F401
from ccxt.async.binance import binance # noqa: F401
from ccxt.async.bit2c import bit2c # noqa: F401
from ccxt.async.bitbay import bitbay # noqa: F401
from ccxt.async.bitcoincoid import bitcoincoid # noqa: F401
from ccxt.async.bitfinex import bitfinex # noqa: F401
from ccxt.async.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async.bitflyer import bitflyer # noqa: F401
from ccxt.async.bithumb import bithumb # noqa: F401
from ccxt.async.bitlish import bitlish # noqa: F401
from ccxt.async.bitmarket import bitmarket # noqa: F401
from ccxt.async.bitmex import bitmex # noqa: F401
from ccxt.async.bitso import bitso # noqa: F401
from ccxt.async.bitstamp import bitstamp # noqa: F401
from ccxt.async.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async.bittrex import bittrex # noqa: F401
from ccxt.async.bl3p import bl3p # noqa: F401
from ccxt.async.bleutrade import bleutrade # noqa: F401
from ccxt.async.btcbox import btcbox # noqa: F401
from ccxt.async.btcchina import btcchina # noqa: F401
from ccxt.async.btcexchange import btcexchange # noqa: F401
from ccxt.async.btcmarkets import btcmarkets # noqa: F401
from ccxt.async.btctradeua import btctradeua # noqa: F401
from ccxt.async.btcturk import btcturk # noqa: F401
from ccxt.async.btcx import btcx # noqa: F401
from ccxt.async.bter import bter # noqa: F401
from ccxt.async.bxinth import bxinth # noqa: F401
from ccxt.async.ccex import ccex # noqa: F401
from ccxt.async.cex import cex # noqa: F401
from ccxt.async.chbtc import chbtc # noqa: F401
from ccxt.async.chilebit import chilebit # noqa: F401
from ccxt.async.coincheck import coincheck # noqa: F401
from ccxt.async.coinfloor import coinfloor # noqa: F401
from ccxt.async.coingi import coingi # noqa: F401
from ccxt.async.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async.coinmate import coinmate # noqa: F401
from ccxt.async.coinsecure import coinsecure # noqa: F401
from ccxt.async.coinspot import coinspot # noqa: F401
from ccxt.async.cryptopia import cryptopia # noqa: F401
from ccxt.async.dsx import dsx # noqa: F401
from ccxt.async.exmo import exmo # noqa: F401
from ccxt.async.flowbtc import flowbtc # noqa: F401
from ccxt.async.foxbit import foxbit # noqa: F401
from ccxt.async.fybse import fybse # noqa: F401
from ccxt.async.fybsg import fybsg # noqa: F401
from ccxt.async.gatecoin import gatecoin # noqa: F401
from ccxt.async.gateio import gateio # noqa: F401
from ccxt.async.gdax import gdax # noqa: F401
from ccxt.async.gemini import gemini # noqa: F401
from ccxt.async.getbtc import getbtc # noqa: F401
from ccxt.async.hitbtc import hitbtc # noqa: F401
from ccxt.async.hitbtc2 import hitbtc2 # noqa: F401
from ccxt.async.huobi import huobi # noqa: F401
from ccxt.async.huobicny import huobicny # noqa: F401
from ccxt.async.huobipro import huobipro # noqa: F401
from ccxt.async.independentreserve import independentreserve # noqa: F401
from ccxt.async.itbit import itbit # noqa: F401
from ccxt.async.jubi import jubi # noqa: F401
from ccxt.async.kraken import kraken # noqa: F401
from ccxt.async.kucoin import kucoin # noqa: F401
from ccxt.async.kuna import kuna # noqa: F401
from ccxt.async.lakebtc import lakebtc # noqa: F401
from ccxt.async.liqui import liqui # noqa: F401
from ccxt.async.livecoin import livecoin # noqa: F401
from ccxt.async.luno import luno # noqa: F401
from ccxt.async.mercado import mercado # noqa: F401
from ccxt.async.mixcoins import mixcoins # noqa: F401
from ccxt.async.nova import nova # noqa: F401
from ccxt.async.okcoincny import okcoincny # noqa: F401
from ccxt.async.okcoinusd import okcoinusd # noqa: F401
from ccxt.async.okex import okex # noqa: F401
from ccxt.async.paymium import paymium # noqa: F401
from ccxt.async.poloniex import poloniex # noqa: F401
from ccxt.async.qryptos import qryptos # noqa: F401
from ccxt.async.quadrigacx import quadrigacx # noqa: F401
from ccxt.async.quoine import quoine # noqa: F401
from ccxt.async.southxchange import southxchange # noqa: F401
from ccxt.async.surbitcoin import surbitcoin # noqa: F401
from ccxt.async.therock import therock # noqa: F401
from ccxt.async.tidex import tidex # noqa: F401
from ccxt.async.urdubit import urdubit # noqa: F401
from ccxt.async.vaultoro import vaultoro # noqa: F401
from ccxt.async.vbtc import vbtc # noqa: F401
from ccxt.async.virwox import virwox # noqa: F401
from ccxt.async.wex import wex # noqa: F401
from ccxt.async.xbtce import xbtce # noqa: F401
from ccxt.async.yobit import yobit # noqa: F401
from ccxt.async.yunbi import yunbi # noqa: F401
from ccxt.async.zaif import zaif # noqa: F401
from ccxt.async.zb import zb # noqa: F401
exchanges = [
'_1broker',
'_1btcxe',
'acx',
'allcoin',
'anxpro',
'binance',
'bit2c',
'bitbay',
'bitcoincoid',
'bitfinex',
'bitfinex2',
'bitflyer',
'bithumb',
'bitlish',
'bitmarket',
'bitmex',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bl3p',
'bleutrade',
'btcbox',
'btcchina',
'btcexchange',
'btcmarkets',
'btctradeua',
'btcturk',
'btcx',
'bter',
'bxinth',
'ccex',
'cex',
'chbtc',
'chilebit',
'coincheck',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinsecure',
'coinspot',
'cryptopia',
'dsx',
'exmo',
'flowbtc',
'foxbit',
'fybse',
'fybsg',
'gatecoin',
'gateio',
'gdax',
'gemini',
'getbtc',
'hitbtc',
'hitbtc2',
'huobi',
'huobicny',
'huobipro',
'independentreserve',
'itbit',
'jubi',
'kraken',
'kucoin',
'kuna',
'lakebtc',
'liqui',
'livecoin',
'luno',
'mercado',
'mixcoins',
'nova',
'okcoincny',
'okcoinusd',
'okex',
'paymium',
'poloniex',
'qryptos',
'quadrigacx',
'quoine',
'southxchange',
'surbitcoin',
'therock',
'tidex',
'urdubit',
'vaultoro',
'vbtc',
'virwox',
'wex',
'xbtce',
'yobit',
'yunbi',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
]
__all__ = base + errors.__all__ + exchanges
| 45.118943
| 79
| 0.517965
|
d766a7099e7c62e83262ad0bac818d3752161d0a
| 20,760
|
py
|
Python
|
source/preprocessing.py
|
TMarquet/speech_recognition
|
06cf66b89e706306d676e127126d525bd18588f0
|
[
"CC-BY-4.0"
] | null | null | null |
source/preprocessing.py
|
TMarquet/speech_recognition
|
06cf66b89e706306d676e127126d525bd18588f0
|
[
"CC-BY-4.0"
] | null | null | null |
source/preprocessing.py
|
TMarquet/speech_recognition
|
06cf66b89e706306d676e127126d525bd18588f0
|
[
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 16:19:20 2020
@author: kahg8
"""
import os
# Helper libraries
import numpy as np
import time
from scipy.io import wavfile
import sys
import random
from python_speech_features import mfcc,ssc
from tensorflow.keras.models import load_model
labels = ["yes", "no", "up", "down", "left","right", "on", "off", "stop", "go", "zero"
, "one", "two", "three", "four","five", "six", "seven", "eight", "nine",'silence','unknown']
unknown_labels = ["bed", "bird", "cat", "dog", "happy", "house", "marvin", "sheila","tree","wow"]
PATH_LIST = 'C:/Users/martho/Documents/speech_recognition/' if sys.platform == 'win32' else '/root/Projets/speech_recognition/' + 'lists/'
PATH_DATA = 'C:/Users/martho/Documents/speech_recognition/' if sys.platform == 'win32' else '/root/Projets/speech_recognition/' + 'data/'
PATH_MODELS= 'C:/Users/martho/Documents/speech_recognition/' if sys.platform == 'win32' else '/root/Projets/speech_recognition/' + 'models/'
# training_size = 'all'
# use_raw = False
# use_mfcc = True
# use_ssc = False
# use_cut = True
# add_silence = True
# data_augmentation = False
coef_silence = 1
coef_noise = 1
def normalize_audio(audio):
n_audio = np.subtract(audio,np.mean(audio))
n_audio = np.divide(n_audio,n_audio.std())
return n_audio
def pad_audio(signal):
final_signal = signal
n = len(signal)
target_length = 16000
to_add = target_length - n
for i in range(to_add):
if i <= to_add//2:
final_signal.insert(0,0)
else:
final_signal.append(0)
ret = np.array(final_signal,dtype = np.int16)
return ret
def encode_label(x,y,z):
encode = []
for elem in y[:len(y)-1] :
if x == elem :
encode.append(1)
else:
encode.append(0)
if x in z:
encode.append(1)
else:
encode.append(0)
return encode
# max usable = 2062 files per label
def get_max_usable(labels):
usable = 9999999
l = labels[0:10]
for label in l :
all_files = os.listdir(label)
count = 0
print(label)
for file in all_files:
path = os.path.join(label,file)
sample_rate, audio = wavfile.read(path)
if len(audio)==16000:
count +=1
if count < usable :
usable = count
print(usable)
def preprocess_live_data_combined(signal,sample_rate):
length_signal = len(signal)
# if length_signal > 16000:
# best_chunk = []
# best_sum = 0
# for k in range(0,length_signal):
# sub = signal[k:k+16000]
# sum_sub = np.sum(abs(sub))
# if sum_sub > best_sum:
# best_sum = sum_sub
# best_chunk = sub
# # plt.plot(range(0,16000),best_chunk)
# # plt.show()
# signal = best_chunk
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
model_mfcc = load_model('models/cnn_mfcc_20epochs_50batchsize.h5')
prediction_mfcc = model_mfcc.predict(np.array([mfcc_feat]))
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
model_ssc = load_model('models/cnn_ssc_20epochs_50batchsize.h5')
prediction_ssc = model_ssc.predict(np.array([ssc_feat]))
return np.concatenate((prediction_mfcc,prediction_ssc))
def preprocess_live_data(signal,sample_rate):
length_signal = len(signal)
# if length_signal > 16000:
# best_chunk = []
# best_sum = 0
# for k in range(0,length_signal):
# sub = signal[k:k+16000]
# sum_sub = np.sum(abs(sub))
# if sum_sub > best_sum:
# best_sum = sum_sub
# best_chunk = sub
# # plt.plot(range(0,16000),best_chunk)
# # plt.show()
# signal = best_chunk
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
# ssc_feat = ssc(signal,sample_rate,winlen=0.03)
# np.subtract(ssc_feat,np.mean(ssc_feat))
# np.divide(ssc_feat,ssc_feat.std())
return mfcc_feat
def count_down():
start = time.time()
while(time.time() - start < 3):
count = time.time()-start
if count.is_integer():
print(str(int(count))+' ! \n')
def make_training_list(labels,unknown_labels,training_size):
all_labels = labels[:len(labels)-2]
test_files = np.loadtxt(PATH_LIST + 'testing_list.txt', dtype=str)
validation_files = np.loadtxt(PATH_LIST + 'validation_list.txt', dtype=str)
training_files = []
total = 0
if training_size == 'all':
max_k = 2060
max_un = 1000
name = 'training_list_all.txt'
else:
max_k = 100
max_un = 10
name= 'training_list_small.txt'
with open(PATH_DATA + name,'w') as f:
for label in sorted(all_labels):
all_files = os.listdir(label)
count = 0
for file in all_files:
path = label+'/'+file
sample_rate, audio = wavfile.read(path)
if not file in validation_files and not file in test_files and len(audio) == 16000 and count < max_k:
f.write(path + '\n')
count += 1
total += 1
print(count)
for label in sorted(unknown_labels):
all_files = os.listdir(label)
count = 0
for file in all_files:
path = label+'/'+file
sample_rate, audio = wavfile.read(path)
if not file in validation_files and not file in test_files and len(audio) == 16000 and count < max_un:
f.write(path + '\n')
count += 1
total += 1
print(count)
print(total)
def make_validation_list(labels,unknown_labels,training_size):
all_labels = labels[:len(labels)-2]
validation_files = np.loadtxt(PATH_LIST + 'validation_list.txt', dtype=str)
training_files = []
count = {}
total = 0
if training_size == 'all':
max_k = 200
max_un = 100
name = 'validation_list_all.txt'
else:
max_k = 20
max_un = 2
name = 'validation_list_small.txt'
with open(PATH_LIST + name,'w') as f:
for file in validation_files:
label = file.split("/")[0]
if not label in count:
count[label] = 0
sample_rate, audio = wavfile.read(file)
if label in unknown_labels:
max_label = max_un
else:
max_label = max_k
if len(audio) == 16000 and count[label] < max_label:
f.write(file + '\n')
count[label] += 1
total += 1
print(count)
print(total)
def get_training_data(training_size,labels,unknown_labels,use_cut, use_raw, use_mfcc, use_ssc, add_silence, data_augmentation,add_noise):
if training_size == 'all':
training_files = np.loadtxt(PATH_LIST + 'training_list_all.txt', dtype=str)
validation_files = np.loadtxt(PATH_LIST + 'validation_list_all.txt', dtype=str)
else:
training_files = np.loadtxt(PATH_LIST + 'training_list_small.txt', dtype=str)
validation_files = np.loadtxt(PATH_LIST + 'validation_list_small.txt', dtype=str)
if not add_silence:
labels = labels[0:20]+[labels[-1]]
training_data = {'raw':[],'mfcc':[],'ssc':[]}
validation_data = {'raw':[],'mfcc':[],'ssc':[]}
noises = []
for file in os.listdir(PATH_DATA + '_background_noise_'):
if 'wav' in file:
path = os.path.join(PATH_DATA + '_background_noise_',file)
sample_rate, audio = wavfile.read(path)
noises.append(audio)
first = {}
rescale_l = []
for file in training_files:
label = file.split('/')[0]
name = file.split('/')[1]
if not label in first:
first[label] = True
print('Processing label for training: ',label)
audio_for_average = np.array([])
if use_cut and name in os.listdir(label+'_cut'):
path = label + '_cut/'+ name
sample_rate, audio = wavfile.read(path)
else:
path = PATH_DATA + file
sample_rate, audio = wavfile.read(path)
if len(audio < 16000):
audio = pad_audio(audio)
if add_noise:
noise_type = random.randint(0, len(noises)-1)
noise = noises[noise_type]
window = random.randint(0, len(noise)-16000-1)
noise_signal = coef_noise*noise[window:window+16000]
audio = audio + noise_signal
audio_to_process = [audio]
if data_augmentation:
if audio_for_average.size != 0:
average = (audio_for_average + audio)/2
audio_to_process.append(average)
data_to_add = {'raw':[],'mfcc':[],'ssc':[]}
for signal in audio_to_process:
if use_raw :
normalized_signal = normalize_audio(signal)
data_to_add['raw'].append(normalized_signal)
if use_mfcc :
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
data_to_add['mfcc'].append(mfcc_feat)
if use_ssc:
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
data_to_add['ssc'].append(ssc_feat)
for data_type , data in data_to_add.items():
for elem_data in data:
training_data[data_type].append((elem_data,encode_label(label,labels,unknown_labels)))
first = {}
for file in validation_files:
label = file.split("/")[0]
if not label in first:
first[label] = True
print('Processing label for validation: ',label)
audio_for_average = np.array([])
if use_cut and name in os.listdir(label+'_cut'):
path = label + '_cut/'+ name
sample_rate, audio = wavfile.read(path)
else:
path = PATH_DATA + file
sample_rate, audio = wavfile.read(path)
if len(audio < 16000):
audio = pad_audio(audio)
if add_noise:
noise_type = random.randint(0, len(noises)-1)
noise = noises[noise_type]
window = random.randint(0, len(noise)-16000-1)
noise_signal = coef_noise*noise[window:window+16000]
audio = audio + noise_signal
audio_to_process = [audio]
data_to_add = {'raw':[],'mfcc':[],'ssc':[]}
for signal in audio_to_process:
if use_raw :
normalized_signal = normalize_audio(signal)
data_to_add['raw'].append(normalized_signal)
if use_mfcc :
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
data_to_add['mfcc'].append(mfcc_feat)
if use_ssc:
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
data_to_add['ssc'].append(ssc_feat)
for data_type , data in data_to_add.items():
count_val = 0
for elem_data in data:
validation_data[data_type].append((elem_data,encode_label(label,labels,unknown_labels)))
count_val +=1
if add_silence:
if training_size == 'all':
nb_silence_to_add_t = 2060
nb_silence_to_add_v = 206
else:
nb_silence_to_add_t = 200
nb_silence_to_add_v = 20
for i in range(nb_silence_to_add_t):
silence_type = random.randint(0, len(noises)-1)
noise = noises[silence_type]
window = random.randint(0, len(noise)-16000-1)
coef_silence = random.random()
signal = coef_silence*noise[window:window+16000]
data_to_add = {'raw':[],'mfcc':[],'ssc':[]}
if use_raw :
normalized_signal = normalize_audio(signal)
data_to_add['raw'].append(normalized_signal)
if use_mfcc :
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
data_to_add['mfcc'].append(mfcc_feat)
if use_ssc:
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
data_to_add['ssc'].append(ssc_feat)
for data_type , data in data_to_add.items():
for elem_data in data:
training_data[data_type].append((elem_data,encode_label('silence',labels,unknown_labels)))
for i in range(nb_silence_to_add_v):
silence_type = random.randint(0, len(noises)-1)
noise = noises[silence_type]
window = random.randint(0, len(noise)-16000-1)
coef_silence = random.random()
signal = coef_silence*noise[window:window+16000]
data_to_add = {'raw':[],'mfcc':[],'ssc':[]}
if use_raw :
normalized_signal = normalize_audio(signal)
data_to_add['raw'].append(normalized_signal)
if use_mfcc :
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
data_to_add['mfcc'].append(mfcc_feat)
if use_ssc:
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
data_to_add['ssc'].append(ssc_feat)
for data_type , data in data_to_add.items():
for elem_data in data:
validation_data[data_type].append((elem_data,encode_label('silence',labels,unknown_labels)))
training_data_set = {'raw':[],'mfcc':[],'ssc':[]}
training_data_label = {'raw':[],'mfcc':[],'ssc':[]}
validation_data_set = {'raw':[],'mfcc':[],'ssc':[]}
validation_data_label = {'raw':[],'mfcc':[],'ssc':[]}
for data_type, data in training_data.items():
np.random.shuffle(data)
for elem in data:
training_data_set[data_type].append(elem[0].tolist())
training_data_label[data_type].append(elem[1])
training_data_set[data_type] = np.array(training_data_set[data_type])
training_data_label[data_type] = np.array(training_data_label[data_type])
for data_type, data in validation_data.items():
np.random.shuffle(data)
for elem in data:
validation_data_set[data_type].append(elem[0].tolist())
validation_data_label[data_type].append(elem[1])
validation_data_set[data_type] = np.array(validation_data_set[data_type])
validation_data_label[data_type] = np.array(validation_data_label[data_type])
return training_data_set , training_data_label , validation_data_set, validation_data_label
def get_test_data(labels,unknown_labels,test_size, use_raw, use_mfcc, use_ssc, add_silence, data_augmentation,add_noise):
test_files = np.loadtxt(PATH_LIST+ 'testing_list.txt', dtype=str)
test_data = {'raw':[],'mfcc':[],'ssc':[]}
noises = []
for file in os.listdir(PATH_DATA+'_background_noise_'):
if 'wav' in file:
path = os.path.join(PATH_DATA+'_background_noise_',file)
sample_rate, audio = wavfile.read(path)
noises.append(audio)
first = {}
for file in test_files:
label = file.split('/')[0]
if not label in first:
first[label] = True
print('Processing label for test: ',label)
audio_for_average = np.array([])
path = PATH_DATA +file
sample_rate, audio = wavfile.read(path)
if add_noise:
noise_type = random.randint(0, len(noises)-1)
noise = noises[noise_type]
window = random.randint(0, len(noise)-16000-1)
noise_signal = coef_noise*noise[window:window+16000]
audio = audio + noise_signal
audio_to_process = [audio]
if len(audio) < 16000:
continue
if data_augmentation:
if audio_for_average.size != 0:
average = (audio_for_average + audio)/2
audio_to_process.append(average)
data_to_add = {'raw':[],'mfcc':[],'ssc':[]}
for signal in audio_to_process:
if use_raw :
normalized_signal = normalize_audio(signal)
data_to_add['raw'].append(normalized_signal)
if use_mfcc :
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
data_to_add['mfcc'].append(mfcc_feat)
if use_ssc:
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
data_to_add['ssc'].append(ssc_feat)
for data_type , data in data_to_add.items():
for elem_data in data:
test_data[data_type].append((elem_data,encode_label(label,labels,unknown_labels)))
if add_silence:
nb_silence_to_add_t = 200
for i in range(nb_silence_to_add_t):
silence_type = random.randint(0, len(noises)-1)
noise = noises[silence_type]
window = random.randint(0, len(noise)-16000-1)
coef_silence = random.random()
signal = coef_silence*noise[window:window+16000]
data_to_add = {'raw':[],'mfcc':[],'ssc':[]}
if use_raw :
normalized_signal = normalize_audio(signal)
data_to_add['raw'].append(normalized_signal)
if use_mfcc :
mfcc_feat = mfcc(signal,sample_rate,winlen=0.03)
np.subtract(mfcc_feat,np.mean(mfcc_feat))
np.divide(mfcc_feat,mfcc_feat.std())
data_to_add['mfcc'].append(mfcc_feat)
if use_ssc:
ssc_feat = ssc(signal,sample_rate,winlen=0.03)
np.subtract(ssc_feat,np.mean(ssc_feat))
np.divide(ssc_feat,ssc_feat.std())
data_to_add['ssc'].append(ssc_feat)
for data_type , data in data_to_add.items():
for elem_data in data:
test_data[data_type].append((elem_data,encode_label('silence',labels,unknown_labels)))
test_data_set = {'raw':[],'mfcc':[],'ssc':[]}
test_data_label = {'raw':[],'mfcc':[],'ssc':[]}
for data_type, data in test_data.items():
for elem in data:
test_data_set[data_type].append(elem[0].tolist())
test_data_label[data_type].append(elem[1])
test_data_set[data_type] = np.array(test_data_set[data_type])
test_data_label[data_type] = np.array(test_data_label[data_type])
return test_data_set , test_data_label
| 37.676951
| 141
| 0.55843
|
043311f3b67d002217d28e31319a91c53d7c252b
| 18,417
|
py
|
Python
|
rasa/nlu/training_data/training_data.py
|
harada4atsushi/rasa
|
c6de93d9b4cd8343529f8f54853b861bee695a0b
|
[
"Apache-2.0"
] | 1
|
2020-07-20T16:29:56.000Z
|
2020-07-20T16:29:56.000Z
|
rasa/nlu/training_data/training_data.py
|
DomAmato/rasa
|
b1bdd5faef79fb8de7022e614d5f58e85f505b4d
|
[
"Apache-2.0"
] | 14
|
2020-06-10T00:28:01.000Z
|
2020-10-28T01:04:36.000Z
|
rasa/nlu/training_data/training_data.py
|
DomAmato/rasa
|
b1bdd5faef79fb8de7022e614d5f58e85f505b4d
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import random
from collections import Counter, OrderedDict
from copy import deepcopy
from os.path import relpath
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Callable
import rasa.nlu.utils
from rasa.utils.common import raise_warning, lazy_property
from rasa.nlu.constants import (
RESPONSE,
RESPONSE_KEY_ATTRIBUTE,
NO_ENTITY_TAG,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
INTENT,
ENTITIES,
)
from rasa.nlu.training_data.message import Message
from rasa.nlu.training_data.util import check_duplicate_synonym
from rasa.nlu.utils import list_to_str
DEFAULT_TRAINING_DATA_OUTPUT_PATH = "training_data.json"
logger = logging.getLogger(__name__)
class TrainingData:
"""Holds loaded intent and entity training data."""
# Validation will ensure and warn if these lower limits are not met
MIN_EXAMPLES_PER_INTENT = 2
MIN_EXAMPLES_PER_ENTITY = 2
def __init__(
self,
training_examples: Optional[List[Message]] = None,
entity_synonyms: Optional[Dict[Text, Text]] = None,
regex_features: Optional[List[Dict[Text, Text]]] = None,
lookup_tables: Optional[List[Dict[Text, Text]]] = None,
nlg_stories: Optional[Dict[Text, List[Text]]] = None,
) -> None:
if training_examples:
self.training_examples = self.sanitize_examples(training_examples)
else:
self.training_examples = []
self.entity_synonyms = entity_synonyms if entity_synonyms else {}
self.regex_features = regex_features if regex_features else []
self.sort_regex_features()
self.lookup_tables = lookup_tables if lookup_tables else []
self.nlg_stories = nlg_stories if nlg_stories else {}
def merge(self, *others: "TrainingData") -> "TrainingData":
"""Return merged instance of this data with other training data."""
training_examples = deepcopy(self.training_examples)
entity_synonyms = self.entity_synonyms.copy()
regex_features = deepcopy(self.regex_features)
lookup_tables = deepcopy(self.lookup_tables)
nlg_stories = deepcopy(self.nlg_stories)
others = [other for other in others if other]
for o in others:
training_examples.extend(deepcopy(o.training_examples))
regex_features.extend(deepcopy(o.regex_features))
lookup_tables.extend(deepcopy(o.lookup_tables))
for text, syn in o.entity_synonyms.items():
check_duplicate_synonym(
entity_synonyms, text, syn, "merging training data"
)
entity_synonyms.update(o.entity_synonyms)
nlg_stories.update(o.nlg_stories)
return TrainingData(
training_examples,
entity_synonyms,
regex_features,
lookup_tables,
nlg_stories,
)
def filter_training_examples(
self, condition: Callable[[Message], bool]
) -> "TrainingData":
"""Filter training examples.
Args:
condition: A function that will be applied to filter training examples.
Returns:
TrainingData: A TrainingData with filtered training examples.
"""
return TrainingData(
list(filter(condition, self.training_examples)),
self.entity_synonyms,
self.regex_features,
self.lookup_tables,
)
def filter_by_intent(self, intent: Text) -> "TrainingData":
"""Filter training examples."""
raise_warning(
"The `filter_by_intent` function is deprecated. "
"Please use `filter_training_examples` instead.",
DeprecationWarning,
stacklevel=2,
)
return self.filter_training_examples(lambda ex: intent == ex.get(INTENT))
def __hash__(self) -> int:
from rasa.core import utils as core_utils
stringified = self.nlu_as_json() + self.nlg_as_markdown()
text_hash = core_utils.get_text_hash(stringified)
return int(text_hash, 16)
@staticmethod
def sanitize_examples(examples: List[Message]) -> List[Message]:
"""Makes sure the training data is clean.
Remove trailing whitespaces from intent and response annotations and drop
duplicate examples.
"""
for ex in examples:
if ex.get(INTENT):
ex.set(INTENT, ex.get(INTENT).strip())
if ex.get(RESPONSE):
ex.set(RESPONSE, ex.get(RESPONSE).strip())
return list(OrderedDict.fromkeys(examples))
@lazy_property
def intent_examples(self) -> List[Message]:
return [ex for ex in self.training_examples if ex.get(INTENT)]
@lazy_property
def response_examples(self) -> List[Message]:
return [ex for ex in self.training_examples if ex.get(RESPONSE)]
@lazy_property
def entity_examples(self) -> List[Message]:
return [ex for ex in self.training_examples if ex.get(ENTITIES)]
@lazy_property
def intents(self) -> Set[Text]:
"""Returns the set of intents in the training data."""
return {ex.get(INTENT) for ex in self.training_examples} - {None}
@lazy_property
def responses(self) -> Set[Text]:
"""Returns the set of responses in the training data."""
return {ex.get(RESPONSE) for ex in self.training_examples} - {None}
@lazy_property
def retrieval_intents(self) -> Set[Text]:
"""Returns the total number of response types in the training data"""
return {
ex.get(INTENT)
for ex in self.training_examples
if ex.get(RESPONSE) is not None
}
@lazy_property
def examples_per_intent(self) -> Dict[Text, int]:
"""Calculates the number of examples per intent."""
intents = [ex.get(INTENT) for ex in self.training_examples]
return dict(Counter(intents))
@lazy_property
def examples_per_response(self) -> Dict[Text, int]:
"""Calculates the number of examples per response."""
return dict(Counter(self.responses))
@lazy_property
def entities(self) -> Set[Text]:
"""Returns the set of entity types in the training data."""
entity_types = [e.get(ENTITY_ATTRIBUTE_TYPE) for e in self.sorted_entities()]
return set(entity_types)
@lazy_property
def entity_roles(self) -> Set[Text]:
"""Returns the set of entity roles in the training data."""
entity_types = [
e.get(ENTITY_ATTRIBUTE_ROLE)
for e in self.sorted_entities()
if ENTITY_ATTRIBUTE_ROLE in e
]
return set(entity_types) - {NO_ENTITY_TAG}
@lazy_property
def entity_groups(self) -> Set[Text]:
"""Returns the set of entity groups in the training data."""
entity_types = [
e.get(ENTITY_ATTRIBUTE_GROUP)
for e in self.sorted_entities()
if ENTITY_ATTRIBUTE_GROUP in e
]
return set(entity_types) - {NO_ENTITY_TAG}
def entity_roles_groups_used(self) -> bool:
entity_groups_used = (
self.entity_groups is not None and len(self.entity_groups) > 0
)
entity_roles_used = self.entity_roles is not None and len(self.entity_roles) > 0
return entity_groups_used or entity_roles_used
@lazy_property
def examples_per_entity(self) -> Dict[Text, int]:
"""Calculates the number of examples per entity."""
entities = []
def _append_entity(entity: Dict[Text, Any], attribute: Text) -> None:
if attribute in entity:
_value = entity.get(attribute)
if _value is not None and _value != NO_ENTITY_TAG:
entities.append(f"{attribute} '{_value}'")
for entity in self.sorted_entities():
_append_entity(entity, ENTITY_ATTRIBUTE_TYPE)
_append_entity(entity, ENTITY_ATTRIBUTE_ROLE)
_append_entity(entity, ENTITY_ATTRIBUTE_GROUP)
return dict(Counter(entities))
def sort_regex_features(self) -> None:
"""Sorts regex features lexicographically by name+pattern"""
self.regex_features = sorted(
self.regex_features, key=lambda e: "{}+{}".format(e["name"], e["pattern"])
)
def fill_response_phrases(self) -> None:
"""Set response phrase for all examples by looking up NLG stories"""
for example in self.training_examples:
response_key = example.get(RESPONSE_KEY_ATTRIBUTE)
# if response_key is None, that means the corresponding intent is not a
# retrieval intent and hence no response text needs to be fetched.
# If response_key is set, fetch the corresponding response text
if response_key:
# look for corresponding bot utterance
story_lookup_intent = example.get_combined_intent_response_key()
assistant_utterances = self.nlg_stories.get(story_lookup_intent, [])
if assistant_utterances:
# selecting only first assistant utterance for now
example.set(RESPONSE, assistant_utterances[0])
else:
raise ValueError(
"No response phrases found for {}. Check training data "
"files for a possible wrong intent name in NLU/NLG file".format(
story_lookup_intent
)
)
def nlu_as_json(self, **kwargs: Any) -> Text:
"""Represent this set of training examples as json."""
from rasa.nlu.training_data.formats import ( # pytype: disable=pyi-error
RasaWriter,
)
return RasaWriter().dumps(self, **kwargs)
def as_json(self) -> Text:
raise_warning(
"Function 'as_json()' is deprecated and will be removed "
"in future versions. Use 'nlu_as_json()' instead.",
DeprecationWarning,
)
return self.nlu_as_json()
def nlg_as_markdown(self) -> Text:
"""Generates the markdown representation of the response phrases(NLG) of
TrainingData."""
from rasa.nlu.training_data.formats import ( # pytype: disable=pyi-error
NLGMarkdownWriter,
)
return NLGMarkdownWriter().dumps(self)
def nlu_as_markdown(self) -> Text:
"""Generates the markdown representation of the NLU part of TrainingData."""
from rasa.nlu.training_data.formats import ( # pytype: disable=pyi-error
MarkdownWriter,
)
return MarkdownWriter().dumps(self)
def as_markdown(self) -> Text:
raise_warning(
"Function 'as_markdown()' is deprecated and will be removed "
"in future versions. Use 'nlu_as_markdown()' and 'nlg_as_markdown()' "
"instead.",
DeprecationWarning,
)
return self.nlu_as_markdown()
def persist_nlu(self, filename: Text = DEFAULT_TRAINING_DATA_OUTPUT_PATH):
if filename.endswith("json"):
rasa.nlu.utils.write_to_file(filename, self.nlu_as_json(indent=2))
elif filename.endswith("md"):
rasa.nlu.utils.write_to_file(filename, self.nlu_as_markdown())
else:
ValueError(
"Unsupported file format detected. Supported file formats are 'json' "
"and 'md'."
)
def persist_nlg(self, filename: Text) -> None:
nlg_serialized_data = self.nlg_as_markdown()
if nlg_serialized_data == "":
return
rasa.nlu.utils.write_to_file(filename, self.nlg_as_markdown())
@staticmethod
def get_nlg_persist_filename(nlu_filename: Text) -> Text:
# Add nlg_ as prefix and change extension to .md
filename = os.path.join(
os.path.dirname(nlu_filename),
"nlg_" + os.path.splitext(os.path.basename(nlu_filename))[0] + ".md",
)
return filename
def persist(
self, dir_name: Text, filename: Text = DEFAULT_TRAINING_DATA_OUTPUT_PATH
) -> Dict[Text, Any]:
"""Persists this training data to disk and returns necessary
information to load it again."""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
nlu_data_file = os.path.join(dir_name, filename)
self.persist_nlu(nlu_data_file)
self.persist_nlg(self.get_nlg_persist_filename(nlu_data_file))
return {"training_data": relpath(nlu_data_file, dir_name)}
def sorted_entities(self) -> List[Any]:
"""Extract all entities from examples and sorts them by entity type."""
entity_examples = [
entity for ex in self.entity_examples for entity in ex.get("entities")
]
return sorted(entity_examples, key=lambda e: e["entity"])
def sorted_intent_examples(self) -> List[Message]:
"""Sorts the intent examples by the name of the intent and then response"""
return sorted(
self.intent_examples, key=lambda e: (e.get(INTENT), e.get(RESPONSE))
)
def validate(self) -> None:
"""Ensures that the loaded training data is valid.
Checks that the data has a minimum of certain training examples."""
logger.debug("Validating training data...")
if "" in self.intents:
raise_warning(
"Found empty intent, please check your "
"training data. This may result in wrong "
"intent predictions."
)
if "" in self.responses:
raise_warning(
"Found empty response, please check your "
"training data. This may result in wrong "
"response predictions."
)
# emit warnings for intents with only a few training samples
for intent, count in self.examples_per_intent.items():
if count < self.MIN_EXAMPLES_PER_INTENT:
raise_warning(
f"Intent '{intent}' has only {count} training examples! "
f"Minimum is {self.MIN_EXAMPLES_PER_INTENT}, training may fail."
)
# emit warnings for entities with only a few training samples
for entity, count in self.examples_per_entity.items():
if count < self.MIN_EXAMPLES_PER_ENTITY:
raise_warning(
f"Entity {entity} has only {count} training examples! "
f"The minimum is {self.MIN_EXAMPLES_PER_ENTITY}, because of "
f"this the training may fail."
)
def train_test_split(
self, train_frac: float = 0.8, random_seed: Optional[int] = None
) -> Tuple["TrainingData", "TrainingData"]:
"""Split into a training and test dataset,
preserving the fraction of examples per intent."""
# collect all nlu data
test, train = self.split_nlu_examples(train_frac, random_seed)
# collect all nlg stories
test_nlg_stories, train_nlg_stories = self.split_nlg_responses(test, train)
data_train = TrainingData(
train,
entity_synonyms=self.entity_synonyms,
regex_features=self.regex_features,
lookup_tables=self.lookup_tables,
nlg_stories=train_nlg_stories,
)
data_train.fill_response_phrases()
data_test = TrainingData(
test,
entity_synonyms=self.entity_synonyms,
regex_features=self.regex_features,
lookup_tables=self.lookup_tables,
nlg_stories=test_nlg_stories,
)
data_test.fill_response_phrases()
return data_train, data_test
def split_nlg_responses(
self, test, train
) -> Tuple[Dict[Text, list], Dict[Text, list]]:
train_nlg_stories = self.build_nlg_stories_from_examples(train)
test_nlg_stories = self.build_nlg_stories_from_examples(test)
return test_nlg_stories, train_nlg_stories
@staticmethod
def build_nlg_stories_from_examples(examples) -> Dict[Text, list]:
nlg_stories = {}
for ex in examples:
if ex.get(RESPONSE_KEY_ATTRIBUTE) and ex.get(RESPONSE):
nlg_stories[ex.get_combined_intent_response_key()] = [ex.get(RESPONSE)]
return nlg_stories
def split_nlu_examples(
self, train_frac: float, random_seed: Optional[int] = None
) -> Tuple[list, list]:
train, test = [], []
for intent, count in self.examples_per_intent.items():
ex = [e for e in self.intent_examples if e.data[INTENT] == intent]
if random_seed is not None:
random.Random(random_seed).shuffle(ex)
else:
random.shuffle(ex)
n_train = int(count * train_frac)
train.extend(ex[:n_train])
test.extend(ex[n_train:])
return test, train
def print_stats(self) -> None:
logger.info("Training data stats:")
logger.info(
f"Number of intent examples: {len(self.intent_examples)} "
f"({len(self.intents)} distinct intents)"
)
if self.intents:
logger.info(f" Found intents: {list_to_str(self.intents)}")
logger.info(
f"Number of response examples: {len(self.response_examples)} "
f"({len(self.responses)} distinct responses)"
)
logger.info(
f"Number of entity examples: {len(self.entity_examples)} "
f"({len(self.entities)} distinct entities)"
)
if self.entities:
logger.info(f" Found entity types: {list_to_str(self.entities)}")
if self.entity_roles:
logger.info(f" Found entity roles: {list_to_str(self.entity_roles)}")
if self.entity_groups:
logger.info(f" Found entity groups: {list_to_str(self.entity_groups)}")
def is_empty(self) -> bool:
"""Checks if any training data was loaded."""
lists_to_check = [
self.training_examples,
self.entity_synonyms,
self.regex_features,
self.lookup_tables,
]
return not any([len(l) > 0 for l in lists_to_check])
| 36.397233
| 88
| 0.623554
|
b6f6bd3b44cd15e9d9f8735511dfe400ee16b25f
| 1,048
|
py
|
Python
|
pipeline/handlers.py
|
sphoebs/rockshell
|
a28297b2a3837e63ac7970344a598e51b0c298bc
|
[
"Apache-2.0"
] | 1
|
2016-01-22T08:21:15.000Z
|
2016-01-22T08:21:15.000Z
|
src/lib/pipeline/handlers.py
|
Eforcers/inbox-cleaner
|
bbb516606cff4142ef355691d137e9ac4d2192e3
|
[
"BSD-2-Clause",
"MIT",
"MIT-0",
"CC-BY-3.0",
"BSD-3-Clause"
] | 2
|
2019-01-13T22:47:08.000Z
|
2019-02-03T08:02:57.000Z
|
src/pipeline/handlers.py
|
svn2github/appengine-pipeline
|
103c2125aca95676faf8cf567355ce03d1d11466
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web request dispatcher for the Google App Engine Pipeline API.
In a separate file from the core pipeline module to break circular dependencies.
"""
import logging
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util as webapp_util
import pipeline
_APP = webapp.WSGIApplication(pipeline.create_handlers_map(), debug=True)
def _main():
webapp_util.run_wsgi_app(_APP)
if __name__ == '__main__':
_main()
| 26.871795
| 80
| 0.765267
|
39f3714dc821406515f9a39781bb71324d5e9b2e
| 703
|
py
|
Python
|
src/convert_presidents.py
|
sjml/incumbency
|
3fe98ad7dae54857304a90c2e5a7d6be9cf9b32c
|
[
"MIT"
] | 1
|
2020-03-02T00:38:42.000Z
|
2020-03-02T00:38:42.000Z
|
src/convert_presidents.py
|
sjml/incumbency
|
3fe98ad7dae54857304a90c2e5a7d6be9cf9b32c
|
[
"MIT"
] | null | null | null |
src/convert_presidents.py
|
sjml/incumbency
|
3fe98ad7dae54857304a90c2e5a7d6be9cf9b32c
|
[
"MIT"
] | null | null | null |
import os
import shutil
import subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GOV_PATH = "../data/raw/david_leip/Pres Election Data (xls)"
OUTPUT_PATH = "../data/interim/president_data"
# requires libreoffice to be installed :-/
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
for f in os.listdir(GOV_PATH):
if f.endswith(".xlsx"):
shutil.copyfile(os.path.join(GOV_PATH, f), os.path.join(OUTPUT_PATH, f))
elif f.endswith(".xls"):
subprocess.run([
"soffice",
"--headless",
"--convert-to",
"xlsx",
os.path.join(GOV_PATH, f),
"--outdir",
OUTPUT_PATH
])
| 27.038462
| 80
| 0.603129
|
949ab73941c65a3e0c22814704c72cce9be8b42b
| 3,932
|
py
|
Python
|
test/functional/test_framework/blocktools.py
|
gdrcoin/gdrcoin
|
f9f2137b3d9069bfc8e3c69c90a684a061dfb6aa
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/blocktools.py
|
gdrcoin/gdrcoin
|
f9f2137b3d9069bfc8e3c69c90a684a061dfb6aa
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/blocktools.py
|
gdrcoin/gdrcoin
|
f9f2137b3d9069bfc8e3c69c90a684a061dfb6aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Gdrcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .mininode import *
from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
# According to BIP141, blocks with witness rules active must commit to the
# hash of all in-block transactions including witness.
def add_witness_commitment(block, nonce=0):
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height/150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction.
# If the scriptPubKey is not specified, make it anyone-can-spend.
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
| 35.423423
| 104
| 0.709054
|
362ac1585d7b67c60682d7ddd8a07f7568a39cf2
| 7,823
|
py
|
Python
|
breaker_audio/component_cmn/encoder/inference.py
|
kozzion/breaker_audio
|
0f27b3ae581fbeb8f79d0b8755a139f7438ca02b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
breaker_audio/component_cmn/encoder/inference.py
|
kozzion/breaker_audio
|
0f27b3ae581fbeb8f79d0b8755a139f7438ca02b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
breaker_audio/component_cmn/encoder/inference.py
|
kozzion/breaker_audio
|
0f27b3ae581fbeb8f79d0b8755a139f7438ca02b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from matplotlib import cm
from encoder import audio
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
from breaker_audio.component_cmn.encoder.params_data import *
from breaker_audio.component_cmn.encoder.model import SpeakerEncoder
_model = None # type: SpeakerEncoder
_device = torch.device('cpu') # None # type: torch.device
def load_model(weights_fpath: Path, device=None):
"""
Loads the model in memory. If this function is not explicitely called, it will be run on the
first call to embed_frames() with the default weights file.
:param weights_fpath: the path to saved model weights.
:param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The
model will be loaded and will run on this device. Outputs will however always be on the cpu.
If None, will default to your GPU if it"s available, otherwise your CPU.
"""
# TODO: I think the slow loading of the encoder might have something to do with the device it
# was saved on. Worth investigating.
global _model, _device
if device is None:
_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
elif isinstance(device, str):
_device = torch.device(device)
_model = SpeakerEncoder(_device, torch.device("cpu"))
checkpoint = torch.load(weights_fpath, map_location=_device.type)
_model.load_state_dict(checkpoint["model_state"])
_model.eval()
print("Loaded encoder \"%s\" trained to step %d" % (weights_fpath.name, checkpoint["step"]))
def is_loaded():
return _model is not None
def embed_frames_batch(frames_batch):
"""
Computes embeddings for a batch of mel spectrogram.
:param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape
(batch_size, n_frames, n_channels)
:return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size)
"""
if _model is None:
raise Exception("Model was not loaded. Call load_model() before inference.")
frames = torch.from_numpy(frames_batch).to(_device)
embed = _model.forward(frames).detach().cpu().numpy()
return embed
def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames,
min_pad_coverage=0.75, overlap=0.5):
"""
Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain
partial utterances of <partial_utterance_n_frames> each. Both the waveform and the mel
spectrogram slices are returned, so as to make each partial utterance waveform correspond to
its spectrogram. This function assumes that the mel spectrogram parameters used are those
defined in params_data.py.
The returned ranges may be indexing further than the length of the waveform. It is
recommended that you pad the waveform with zeros up to wave_slices[-1].stop.
:param n_samples: the number of samples in the waveform
:param partial_utterance_n_frames: the number of mel spectrogram frames in each partial
utterance
:param min_pad_coverage: when reaching the last partial utterance, it may or may not have
enough frames. If at least <min_pad_coverage> of <partial_utterance_n_frames> are present,
then the last partial utterance will be considered, as if we padded the audio. Otherwise,
it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial
utterance, this parameter is ignored so that the function always returns at least 1 slice.
:param overlap: by how much the partial utterance should overlap. If set to 0, the partial
utterances are entirely disjoint.
:return: the waveform slices and mel spectrogram slices as lists of array slices. Index
respectively the waveform and the mel spectrogram with these slices to obtain the partial
utterances.
"""
assert 0 <= overlap < 1
assert 0 < min_pad_coverage <= 1
samples_per_frame = int((sampling_rate * mel_window_step / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)
# Compute the slices
wav_slices, mel_slices = [], []
steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + partial_utterance_n_frames])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
# Evaluate whether extra padding is warranted or not
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
if coverage < min_pad_coverage and len(mel_slices) > 1:
mel_slices = mel_slices[:-1]
wav_slices = wav_slices[:-1]
return wav_slices, mel_slices
def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs):
"""
Computes an embedding for a single utterance.
# TODO: handle multiple wavs to benefit from batching on GPU
:param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32
:param using_partials: if True, then the utterance is split in partial utterances of
<partial_utterance_n_frames> frames and the utterance embedding is computed from their
normalized average. If False, the utterance is instead computed from feeding the entire
spectogram to the network.
:param return_partials: if True, the partial embeddings will also be returned along with the
wav slices that correspond to the partial embeddings.
:param kwargs: additional arguments to compute_partial_splits()
:return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
<return_partials> is True, the partial utterances as a numpy array of float32 of shape
(n_partials, model_embedding_size) and the wav partials as a list of slices will also be
returned. If <using_partials> is simultaneously set to False, both these values will be None
instead.
"""
# Process the entire utterance if not using partials
if not using_partials:
frames = audio.wav_to_mel_spectrogram(wav)
embed = embed_frames_batch(frames[None, ...])[0]
if return_partials:
return embed, None, None
return embed
# Compute where to split the utterance into partials and pad if necessary
wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs)
max_wave_length = wave_slices[-1].stop
if max_wave_length >= len(wav):
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
# Split the utterance into partials
frames = audio.wav_to_mel_spectrogram(wav)
frames_batch = np.array([frames[s] for s in mel_slices])
partial_embeds = embed_frames_batch(frames_batch)
# Compute the utterance embedding from the partial embeddings
raw_embed = np.mean(partial_embeds, axis=0)
embed = raw_embed / np.linalg.norm(raw_embed, 2)
if return_partials:
return embed, partial_embeds, wave_slices
return embed
def embed_speaker(wavs, **kwargs):
raise NotImplemented()
def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)):
if ax is None:
ax = plt.gca()
if shape is None:
height = int(np.sqrt(len(embed)))
shape = (height, -1)
embed = embed.reshape(shape)
cmap = cm.get_cmap()
mappable = ax.imshow(embed, cmap=cmap)
cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04)
cbar.set_clim(*color_range)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title(title)
| 43.949438
| 99
| 0.717755
|
3457c9f693ad3ba247172250a438460307dbac95
| 95
|
py
|
Python
|
lightautoml/ml_algo/tuning/gpu/__init__.py
|
Rishat-skoltech/LightAutoML_GPU
|
4a0a524dc097de94b90871e40f2e33159a0e19b5
|
[
"Apache-2.0"
] | 2
|
2022-03-21T19:15:53.000Z
|
2022-03-30T07:41:59.000Z
|
lightautoml/ml_algo/tuning/gpu/__init__.py
|
Rishat-skoltech/LightAutoML_GPU
|
4a0a524dc097de94b90871e40f2e33159a0e19b5
|
[
"Apache-2.0"
] | null | null | null |
lightautoml/ml_algo/tuning/gpu/__init__.py
|
Rishat-skoltech/LightAutoML_GPU
|
4a0a524dc097de94b90871e40f2e33159a0e19b5
|
[
"Apache-2.0"
] | null | null | null |
"""Presets for end-to-end model training for special tasks."""
__all__ = [
"optuna_gpu"
]
| 15.833333
| 62
| 0.663158
|
330c17b251aef3e57d0e3ce711b7f648c88624cc
| 1,373
|
py
|
Python
|
gmaps_local/sample_result.py
|
asemic-horizon/gmaps_local
|
df56d878df81d7787c8835ce77f010ca1f1c4b9a
|
[
"MIT"
] | null | null | null |
gmaps_local/sample_result.py
|
asemic-horizon/gmaps_local
|
df56d878df81d7787c8835ce77f010ca1f1c4b9a
|
[
"MIT"
] | null | null | null |
gmaps_local/sample_result.py
|
asemic-horizon/gmaps_local
|
df56d878df81d7787c8835ce77f010ca1f1c4b9a
|
[
"MIT"
] | null | null | null |
sample = [
{'address_components':
[{'long_name': '205',
'short_name': '205',
'types': ['street_number']},
{'long_name': 'Rua Marlo da Costa e Souza',
'short_name': 'R. Marlo da Costa e Souza',
'types': ['route']},
{'long_name': 'Barra da Tijuca',
'short_name': 'Barra da Tijuca',
'types': ['political', 'sublocality', 'sublocality_level_1']},
{'long_name': 'Rio de Janeiro', 'short_name': 'Rio de Janeiro',
'types': ['administrative_area_level_2', 'political']}, {'long_name': 'Rio de Janeiro', 'short_name': 'RJ', 'types': ['administrative_area_level_1', 'political']}, {'long_name': 'Brazil', 'short_name': 'BR', 'types': ['country', 'political']}, {'long_name': '22790-735', 'short_name': '22790-735', 'types': ['postal_code']}], 'formatted_address': 'R. Marlo da Costa e Souza, 205 - Barra da Tijuca, Rio de Janeiro - RJ, 22790-735, Brazil', 'geometry': {'bounds': {'northeast': {'lat': -23.0079626, 'lng': -43.4347307}, 'southwest': {'lat': -23.0097596, 'lng': -43.4370281}}, 'location': {'lat': -23.0084271, 'lng': -43.4354288}, 'location_type': 'GEOMETRIC_CENTER', 'viewport': {'northeast': {'lat': -23.00751211970849, 'lng': -43.43453041970849}, 'southwest': {'lat': -23.0102100802915, 'lng': -43.43722838029149}}}, 'place_id': 'ChIJoWBeCujcmwARWwGCodY6gH8', 'types': ['premise']}]
| 98.071429
| 889
| 0.619811
|
86c3ff71d9d9ecac545eed9a8df959194624f0f6
| 398
|
py
|
Python
|
FIRMCORN/procedures/__init__.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
FIRMCORN/procedures/__init__.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
FIRMCORN/procedures/__init__.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
# from getenv import *
from memset import *
# from printf import *
from scanf import *
from strcpy import *
from sprintf import *
from strdup import *
from memmove import *
from memcpy import *
from free import *
from snprintf import *
from socket import *
from send import *
from connect import *
from fcntl import *
from getpid import *
from close import *
from open_ import *
from rand_ import *
| 20.947368
| 22
| 0.753769
|
06c1d6cc1755ef022aa78967d4b651e21fd66618
| 31,425
|
py
|
Python
|
PythonAndroid/youtube-dl/lib/python3.5/youtube_dl/swfinterp.py
|
jianglei12138/python-3.5.1
|
2d248ceba8aa4c14ee43e57ece99cc1a43fd22b7
|
[
"PSF-2.0"
] | 5
|
2016-04-25T16:26:07.000Z
|
2021-04-28T16:10:29.000Z
|
PythonAndroid/youtube-dl/lib/python3.5/youtube_dl/swfinterp.py
|
jianglei12138/python-3.5.1
|
2d248ceba8aa4c14ee43e57ece99cc1a43fd22b7
|
[
"PSF-2.0"
] | 5
|
2016-04-22T01:33:31.000Z
|
2016-08-04T15:33:19.000Z
|
PythonAndroid/youtube-dl/lib/python3.5/youtube_dl/swfinterp.py
|
jianglei12138/python-3.5.1
|
2d248ceba8aa4c14ee43e57ece99cc1a43fd22b7
|
[
"PSF-2.0"
] | 4
|
2016-04-26T15:27:38.000Z
|
2018-11-12T21:04:54.000Z
|
from __future__ import unicode_literals
import collections
import io
import zlib
from .compat import compat_str
from .utils import (
ExtractorError,
struct_unpack,
)
def _extract_tags(file_contents):
if file_contents[1:3] != b'WS':
raise ExtractorError(
'Not an SWF file; header is %r' % file_contents[:3])
if file_contents[:1] == b'C':
content = zlib.decompress(file_contents[8:])
else:
raise NotImplementedError(
'Unsupported compression format %r' %
file_contents[:1])
# Determine number of bits in framesize rectangle
framesize_nbits = struct_unpack('!B', content[:1])[0] >> 3
framesize_len = (5 + 4 * framesize_nbits + 7) // 8
pos = framesize_len + 2 + 2
while pos < len(content):
header16 = struct_unpack('<H', content[pos:pos + 2])[0]
pos += 2
tag_code = header16 >> 6
tag_len = header16 & 0x3f
if tag_len == 0x3f:
tag_len = struct_unpack('<I', content[pos:pos + 4])[0]
pos += 4
assert pos + tag_len <= len(content), \
('Tag %d ends at %d+%d - that\'s longer than the file (%d)'
% (tag_code, pos, tag_len, len(content)))
yield (tag_code, content[pos:pos + tag_len])
pos += tag_len
class _AVMClass_Object(object):
def __init__(self, avm_class):
self.avm_class = avm_class
def __repr__(self):
return '%s#%x' % (self.avm_class.name, id(self))
class _ScopeDict(dict):
def __init__(self, avm_class):
super(_ScopeDict, self).__init__()
self.avm_class = avm_class
def __repr__(self):
return '%s__Scope(%s)' % (
self.avm_class.name,
super(_ScopeDict, self).__repr__())
class _AVMClass(object):
def __init__(self, name_idx, name, static_properties=None):
self.name_idx = name_idx
self.name = name
self.method_names = {}
self.method_idxs = {}
self.methods = {}
self.method_pyfunctions = {}
self.static_properties = static_properties if static_properties else {}
self.variables = _ScopeDict(self)
self.constants = {}
def make_object(self):
return _AVMClass_Object(self)
def __repr__(self):
return '_AVMClass(%s)' % (self.name)
def register_methods(self, methods):
self.method_names.update(methods.items())
self.method_idxs.update(dict(
(idx, name)
for name, idx in methods.items()))
class _Multiname(object):
def __init__(self, kind):
self.kind = kind
def __repr__(self):
return '[MULTINAME kind: 0x%x]' % self.kind
def _read_int(reader):
res = 0
shift = 0
for _ in range(5):
buf = reader.read(1)
assert len(buf) == 1
b = struct_unpack('<B', buf)[0]
res = res | ((b & 0x7f) << shift)
if b & 0x80 == 0:
break
shift += 7
return res
def _u30(reader):
res = _read_int(reader)
assert res & 0xf0000000 == 0
return res
_u32 = _read_int
def _s32(reader):
v = _read_int(reader)
if v & 0x80000000 != 0:
v = - ((v ^ 0xffffffff) + 1)
return v
def _s24(reader):
bs = reader.read(3)
assert len(bs) == 3
last_byte = b'\xff' if (ord(bs[2:3]) >= 0x80) else b'\x00'
return struct_unpack('<i', bs + last_byte)[0]
def _read_string(reader):
slen = _u30(reader)
resb = reader.read(slen)
assert len(resb) == slen
return resb.decode('utf-8')
def _read_bytes(count, reader):
assert count >= 0
resb = reader.read(count)
assert len(resb) == count
return resb
def _read_byte(reader):
resb = _read_bytes(1, reader=reader)
res = struct_unpack('<B', resb)[0]
return res
StringClass = _AVMClass('(no name idx)', 'String')
ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray')
TimerClass = _AVMClass('(no name idx)', 'Timer')
TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'})
_builtin_classes = {
StringClass.name: StringClass,
ByteArrayClass.name: ByteArrayClass,
TimerClass.name: TimerClass,
TimerEventClass.name: TimerEventClass,
}
class _Undefined(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __hash__(self):
return 0
def __str__(self):
return 'undefined'
__repr__ = __str__
undefined = _Undefined()
class SWFInterpreter(object):
def __init__(self, file_contents):
self._patched_functions = {
(TimerClass, 'addEventListener'): lambda params: undefined,
}
code_tag = next(tag
for tag_code, tag in _extract_tags(file_contents)
if tag_code == 82)
p = code_tag.index(b'\0', 4) + 1
code_reader = io.BytesIO(code_tag[p:])
# Parse ABC (AVM2 ByteCode)
# Define a couple convenience methods
u30 = lambda *args: _u30(*args, reader=code_reader)
s32 = lambda *args: _s32(*args, reader=code_reader)
u32 = lambda *args: _u32(*args, reader=code_reader)
read_bytes = lambda *args: _read_bytes(*args, reader=code_reader)
read_byte = lambda *args: _read_byte(*args, reader=code_reader)
# minor_version + major_version
read_bytes(2 + 2)
# Constant pool
int_count = u30()
self.constant_ints = [0]
for _c in range(1, int_count):
self.constant_ints.append(s32())
self.constant_uints = [0]
uint_count = u30()
for _c in range(1, uint_count):
self.constant_uints.append(u32())
double_count = u30()
read_bytes(max(0, (double_count - 1)) * 8)
string_count = u30()
self.constant_strings = ['']
for _c in range(1, string_count):
s = _read_string(code_reader)
self.constant_strings.append(s)
namespace_count = u30()
for _c in range(1, namespace_count):
read_bytes(1) # kind
u30() # name
ns_set_count = u30()
for _c in range(1, ns_set_count):
count = u30()
for _c2 in range(count):
u30()
multiname_count = u30()
MULTINAME_SIZES = {
0x07: 2, # QName
0x0d: 2, # QNameA
0x0f: 1, # RTQName
0x10: 1, # RTQNameA
0x11: 0, # RTQNameL
0x12: 0, # RTQNameLA
0x09: 2, # Multiname
0x0e: 2, # MultinameA
0x1b: 1, # MultinameL
0x1c: 1, # MultinameLA
}
self.multinames = ['']
for _c in range(1, multiname_count):
kind = u30()
assert kind in MULTINAME_SIZES, 'Invalid multiname kind %r' % kind
if kind == 0x07:
u30() # namespace_idx
name_idx = u30()
self.multinames.append(self.constant_strings[name_idx])
elif kind == 0x09:
name_idx = u30()
u30()
self.multinames.append(self.constant_strings[name_idx])
else:
self.multinames.append(_Multiname(kind))
for _c2 in range(MULTINAME_SIZES[kind]):
u30()
# Methods
method_count = u30()
MethodInfo = collections.namedtuple(
'MethodInfo',
['NEED_ARGUMENTS', 'NEED_REST'])
method_infos = []
for method_id in range(method_count):
param_count = u30()
u30() # return type
for _ in range(param_count):
u30() # param type
u30() # name index (always 0 for youtube)
flags = read_byte()
if flags & 0x08 != 0:
# Options present
option_count = u30()
for c in range(option_count):
u30() # val
read_bytes(1) # kind
if flags & 0x80 != 0:
# Param names present
for _ in range(param_count):
u30() # param name
mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0)
method_infos.append(mi)
# Metadata
metadata_count = u30()
for _c in range(metadata_count):
u30() # name
item_count = u30()
for _c2 in range(item_count):
u30() # key
u30() # value
def parse_traits_info():
trait_name_idx = u30()
kind_full = read_byte()
kind = kind_full & 0x0f
attrs = kind_full >> 4
methods = {}
constants = None
if kind == 0x00: # Slot
u30() # Slot id
u30() # type_name_idx
vindex = u30()
if vindex != 0:
read_byte() # vkind
elif kind == 0x06: # Const
u30() # Slot id
u30() # type_name_idx
vindex = u30()
vkind = 'any'
if vindex != 0:
vkind = read_byte()
if vkind == 0x03: # Constant_Int
value = self.constant_ints[vindex]
elif vkind == 0x04: # Constant_UInt
value = self.constant_uints[vindex]
else:
return {}, None # Ignore silently for now
constants = {self.multinames[trait_name_idx]: value}
elif kind in (0x01, 0x02, 0x03): # Method / Getter / Setter
u30() # disp_id
method_idx = u30()
methods[self.multinames[trait_name_idx]] = method_idx
elif kind == 0x04: # Class
u30() # slot_id
u30() # classi
elif kind == 0x05: # Function
u30() # slot_id
function_idx = u30()
methods[function_idx] = self.multinames[trait_name_idx]
else:
raise ExtractorError('Unsupported trait kind %d' % kind)
if attrs & 0x4 != 0: # Metadata present
metadata_count = u30()
for _c3 in range(metadata_count):
u30() # metadata index
return methods, constants
# Classes
class_count = u30()
classes = []
for class_id in range(class_count):
name_idx = u30()
cname = self.multinames[name_idx]
avm_class = _AVMClass(name_idx, cname)
classes.append(avm_class)
u30() # super_name idx
flags = read_byte()
if flags & 0x08 != 0: # Protected namespace is present
u30() # protected_ns_idx
intrf_count = u30()
for _c2 in range(intrf_count):
u30()
u30() # iinit
trait_count = u30()
for _c2 in range(trait_count):
trait_methods, trait_constants = parse_traits_info()
avm_class.register_methods(trait_methods)
if trait_constants:
avm_class.constants.update(trait_constants)
assert len(classes) == class_count
self._classes_by_name = dict((c.name, c) for c in classes)
for avm_class in classes:
avm_class.cinit_idx = u30()
trait_count = u30()
for _c2 in range(trait_count):
trait_methods, trait_constants = parse_traits_info()
avm_class.register_methods(trait_methods)
if trait_constants:
avm_class.constants.update(trait_constants)
# Scripts
script_count = u30()
for _c in range(script_count):
u30() # init
trait_count = u30()
for _c2 in range(trait_count):
parse_traits_info()
# Method bodies
method_body_count = u30()
Method = collections.namedtuple('Method', ['code', 'local_count'])
self._all_methods = []
for _c in range(method_body_count):
method_idx = u30()
u30() # max_stack
local_count = u30()
u30() # init_scope_depth
u30() # max_scope_depth
code_length = u30()
code = read_bytes(code_length)
m = Method(code, local_count)
self._all_methods.append(m)
for avm_class in classes:
if method_idx in avm_class.method_idxs:
avm_class.methods[avm_class.method_idxs[method_idx]] = m
exception_count = u30()
for _c2 in range(exception_count):
u30() # from
u30() # to
u30() # target
u30() # exc_type
u30() # var_name
trait_count = u30()
for _c2 in range(trait_count):
parse_traits_info()
assert p + code_reader.tell() == len(code_tag)
def patch_function(self, avm_class, func_name, f):
self._patched_functions[(avm_class, func_name)] = f
def extract_class(self, class_name, call_cinit=True):
try:
res = self._classes_by_name[class_name]
except KeyError:
raise ExtractorError('Class %r not found' % class_name)
if call_cinit and hasattr(res, 'cinit_idx'):
res.register_methods({'$cinit': res.cinit_idx})
res.methods['$cinit'] = self._all_methods[res.cinit_idx]
cinit = self.extract_function(res, '$cinit')
cinit([])
return res
def extract_function(self, avm_class, func_name):
p = self._patched_functions.get((avm_class, func_name))
if p:
return p
if func_name in avm_class.method_pyfunctions:
return avm_class.method_pyfunctions[func_name]
if func_name in self._classes_by_name:
return self._classes_by_name[func_name].make_object()
if func_name not in avm_class.methods:
raise ExtractorError('Cannot find function %s.%s' % (
avm_class.name, func_name))
m = avm_class.methods[func_name]
def resfunc(args):
# Helper functions
coder = io.BytesIO(m.code)
s24 = lambda: _s24(coder)
u30 = lambda: _u30(coder)
registers = [avm_class.variables] + list(args) + [None] * m.local_count
stack = []
scopes = collections.deque([
self._classes_by_name, avm_class.constants, avm_class.variables])
while True:
opcode = _read_byte(coder)
if opcode == 9: # label
pass # Spec says: "Do nothing."
elif opcode == 16: # jump
offset = s24()
coder.seek(coder.tell() + offset)
elif opcode == 17: # iftrue
offset = s24()
value = stack.pop()
if value:
coder.seek(coder.tell() + offset)
elif opcode == 18: # iffalse
offset = s24()
value = stack.pop()
if not value:
coder.seek(coder.tell() + offset)
elif opcode == 19: # ifeq
offset = s24()
value2 = stack.pop()
value1 = stack.pop()
if value2 == value1:
coder.seek(coder.tell() + offset)
elif opcode == 20: # ifne
offset = s24()
value2 = stack.pop()
value1 = stack.pop()
if value2 != value1:
coder.seek(coder.tell() + offset)
elif opcode == 21: # iflt
offset = s24()
value2 = stack.pop()
value1 = stack.pop()
if value1 < value2:
coder.seek(coder.tell() + offset)
elif opcode == 32: # pushnull
stack.append(None)
elif opcode == 33: # pushundefined
stack.append(undefined)
elif opcode == 36: # pushbyte
v = _read_byte(coder)
stack.append(v)
elif opcode == 37: # pushshort
v = u30()
stack.append(v)
elif opcode == 38: # pushtrue
stack.append(True)
elif opcode == 39: # pushfalse
stack.append(False)
elif opcode == 40: # pushnan
stack.append(float('NaN'))
elif opcode == 42: # dup
value = stack[-1]
stack.append(value)
elif opcode == 44: # pushstring
idx = u30()
stack.append(self.constant_strings[idx])
elif opcode == 48: # pushscope
new_scope = stack.pop()
scopes.append(new_scope)
elif opcode == 66: # construct
arg_count = u30()
args = list(reversed(
[stack.pop() for _ in range(arg_count)]))
obj = stack.pop()
res = obj.avm_class.make_object()
stack.append(res)
elif opcode == 70: # callproperty
index = u30()
mname = self.multinames[index]
arg_count = u30()
args = list(reversed(
[stack.pop() for _ in range(arg_count)]))
obj = stack.pop()
if obj == StringClass:
if mname == 'String':
assert len(args) == 1
assert isinstance(args[0], (
int, compat_str, _Undefined))
if args[0] == undefined:
res = 'undefined'
else:
res = compat_str(args[0])
stack.append(res)
continue
else:
raise NotImplementedError(
'Function String.%s is not yet implemented'
% mname)
elif isinstance(obj, _AVMClass_Object):
func = self.extract_function(obj.avm_class, mname)
res = func(args)
stack.append(res)
continue
elif isinstance(obj, _AVMClass):
func = self.extract_function(obj, mname)
res = func(args)
stack.append(res)
continue
elif isinstance(obj, _ScopeDict):
if mname in obj.avm_class.method_names:
func = self.extract_function(obj.avm_class, mname)
res = func(args)
else:
res = obj[mname]
stack.append(res)
continue
elif isinstance(obj, compat_str):
if mname == 'split':
assert len(args) == 1
assert isinstance(args[0], compat_str)
if args[0] == '':
res = list(obj)
else:
res = obj.split(args[0])
stack.append(res)
continue
elif mname == 'charCodeAt':
assert len(args) <= 1
idx = 0 if len(args) == 0 else args[0]
assert isinstance(idx, int)
res = ord(obj[idx])
stack.append(res)
continue
elif isinstance(obj, list):
if mname == 'slice':
assert len(args) == 1
assert isinstance(args[0], int)
res = obj[args[0]:]
stack.append(res)
continue
elif mname == 'join':
assert len(args) == 1
assert isinstance(args[0], compat_str)
res = args[0].join(obj)
stack.append(res)
continue
raise NotImplementedError(
'Unsupported property %r on %r'
% (mname, obj))
elif opcode == 71: # returnvoid
res = undefined
return res
elif opcode == 72: # returnvalue
res = stack.pop()
return res
elif opcode == 73: # constructsuper
# Not yet implemented, just hope it works without it
arg_count = u30()
args = list(reversed(
[stack.pop() for _ in range(arg_count)]))
obj = stack.pop()
elif opcode == 74: # constructproperty
index = u30()
arg_count = u30()
args = list(reversed(
[stack.pop() for _ in range(arg_count)]))
obj = stack.pop()
mname = self.multinames[index]
assert isinstance(obj, _AVMClass)
# We do not actually call the constructor for now;
# we just pretend it does nothing
stack.append(obj.make_object())
elif opcode == 79: # callpropvoid
index = u30()
mname = self.multinames[index]
arg_count = u30()
args = list(reversed(
[stack.pop() for _ in range(arg_count)]))
obj = stack.pop()
if isinstance(obj, _AVMClass_Object):
func = self.extract_function(obj.avm_class, mname)
res = func(args)
assert res is undefined
continue
if isinstance(obj, _ScopeDict):
assert mname in obj.avm_class.method_names
func = self.extract_function(obj.avm_class, mname)
res = func(args)
assert res is undefined
continue
if mname == 'reverse':
assert isinstance(obj, list)
obj.reverse()
else:
raise NotImplementedError(
'Unsupported (void) property %r on %r'
% (mname, obj))
elif opcode == 86: # newarray
arg_count = u30()
arr = []
for i in range(arg_count):
arr.append(stack.pop())
arr = arr[::-1]
stack.append(arr)
elif opcode == 93: # findpropstrict
index = u30()
mname = self.multinames[index]
for s in reversed(scopes):
if mname in s:
res = s
break
else:
res = scopes[0]
if mname not in res and mname in _builtin_classes:
stack.append(_builtin_classes[mname])
else:
stack.append(res[mname])
elif opcode == 94: # findproperty
index = u30()
mname = self.multinames[index]
for s in reversed(scopes):
if mname in s:
res = s
break
else:
res = avm_class.variables
stack.append(res)
elif opcode == 96: # getlex
index = u30()
mname = self.multinames[index]
for s in reversed(scopes):
if mname in s:
scope = s
break
else:
scope = avm_class.variables
if mname in scope:
res = scope[mname]
elif mname in _builtin_classes:
res = _builtin_classes[mname]
else:
# Assume uninitialized
# TODO warn here
res = undefined
stack.append(res)
elif opcode == 97: # setproperty
index = u30()
value = stack.pop()
idx = self.multinames[index]
if isinstance(idx, _Multiname):
idx = stack.pop()
obj = stack.pop()
obj[idx] = value
elif opcode == 98: # getlocal
index = u30()
stack.append(registers[index])
elif opcode == 99: # setlocal
index = u30()
value = stack.pop()
registers[index] = value
elif opcode == 102: # getproperty
index = u30()
pname = self.multinames[index]
if pname == 'length':
obj = stack.pop()
assert isinstance(obj, (compat_str, list))
stack.append(len(obj))
elif isinstance(pname, compat_str): # Member access
obj = stack.pop()
if isinstance(obj, _AVMClass):
res = obj.static_properties[pname]
stack.append(res)
continue
assert isinstance(obj, (dict, _ScopeDict)),\
'Accessing member %r on %r' % (pname, obj)
res = obj.get(pname, undefined)
stack.append(res)
else: # Assume attribute access
idx = stack.pop()
assert isinstance(idx, int)
obj = stack.pop()
assert isinstance(obj, list)
stack.append(obj[idx])
elif opcode == 104: # initproperty
index = u30()
value = stack.pop()
idx = self.multinames[index]
if isinstance(idx, _Multiname):
idx = stack.pop()
obj = stack.pop()
obj[idx] = value
elif opcode == 115: # convert_
value = stack.pop()
intvalue = int(value)
stack.append(intvalue)
elif opcode == 128: # coerce
u30()
elif opcode == 130: # coerce_a
value = stack.pop()
# um, yes, it's any value
stack.append(value)
elif opcode == 133: # coerce_s
assert isinstance(stack[-1], (type(None), compat_str))
elif opcode == 147: # decrement
value = stack.pop()
assert isinstance(value, int)
stack.append(value - 1)
elif opcode == 149: # typeof
value = stack.pop()
return {
_Undefined: 'undefined',
compat_str: 'String',
int: 'Number',
float: 'Number',
}[type(value)]
elif opcode == 160: # add
value2 = stack.pop()
value1 = stack.pop()
res = value1 + value2
stack.append(res)
elif opcode == 161: # subtract
value2 = stack.pop()
value1 = stack.pop()
res = value1 - value2
stack.append(res)
elif opcode == 162: # multiply
value2 = stack.pop()
value1 = stack.pop()
res = value1 * value2
stack.append(res)
elif opcode == 164: # modulo
value2 = stack.pop()
value1 = stack.pop()
res = value1 % value2
stack.append(res)
elif opcode == 168: # bitand
value2 = stack.pop()
value1 = stack.pop()
assert isinstance(value1, int)
assert isinstance(value2, int)
res = value1 & value2
stack.append(res)
elif opcode == 171: # equals
value2 = stack.pop()
value1 = stack.pop()
result = value1 == value2
stack.append(result)
elif opcode == 175: # greaterequals
value2 = stack.pop()
value1 = stack.pop()
result = value1 >= value2
stack.append(result)
elif opcode == 192: # increment_i
value = stack.pop()
assert isinstance(value, int)
stack.append(value + 1)
elif opcode == 208: # getlocal_0
stack.append(registers[0])
elif opcode == 209: # getlocal_1
stack.append(registers[1])
elif opcode == 210: # getlocal_2
stack.append(registers[2])
elif opcode == 211: # getlocal_3
stack.append(registers[3])
elif opcode == 212: # setlocal_0
registers[0] = stack.pop()
elif opcode == 213: # setlocal_1
registers[1] = stack.pop()
elif opcode == 214: # setlocal_2
registers[2] = stack.pop()
elif opcode == 215: # setlocal_3
registers[3] = stack.pop()
else:
raise NotImplementedError(
'Unsupported opcode %d' % opcode)
avm_class.method_pyfunctions[func_name] = resfunc
return resfunc
| 37.861446
| 83
| 0.453874
|
f90b90d5d1e260cbdcfcc9675af5359fbbc4cbfe
| 260
|
py
|
Python
|
gallerypics/admin.py
|
Steve99-coder/Gallery
|
6303e75b6212d1567a77ea4b054580bd9ec6b0d1
|
[
"MIT"
] | 1
|
2021-04-09T20:25:23.000Z
|
2021-04-09T20:25:23.000Z
|
gallerypics/admin.py
|
Steve99-coder/Gallery
|
6303e75b6212d1567a77ea4b054580bd9ec6b0d1
|
[
"MIT"
] | null | null | null |
gallerypics/admin.py
|
Steve99-coder/Gallery
|
6303e75b6212d1567a77ea4b054580bd9ec6b0d1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Image,Category,Location
# Register your models here.
admin.site.register(Image)
admin.site.register(Category)
admin.site.register(Location)
| 21.666667
| 43
| 0.788462
|
104ccf4b5e67ff7034ac4978b33520f62191480d
| 2,484
|
py
|
Python
|
Python/my-calendar-ii.py
|
xiaohalo/LeetCode
|
68211ba081934b21bb1968046b7e3c1459b3da2d
|
[
"MIT"
] | 9
|
2019-06-30T07:15:18.000Z
|
2022-02-10T20:13:40.000Z
|
Python/my-calendar-ii.py
|
pnandini/LeetCode
|
e746c3298be96dec8e160da9378940568ef631b1
|
[
"MIT"
] | 1
|
2018-07-10T03:28:43.000Z
|
2018-07-10T03:28:43.000Z
|
Python/my-calendar-ii.py
|
pnandini/LeetCode
|
e746c3298be96dec8e160da9378940568ef631b1
|
[
"MIT"
] | 9
|
2019-01-16T22:16:49.000Z
|
2022-02-06T17:33:41.000Z
|
# Time: O(n^2)
# Space: O(n)
# Implement a MyCalendarTwo class to store your events.
# A new event can be added if adding the event will not cause a triple booking.
#
# Your class will have one method, book(int start, int end).
# Formally, this represents a booking on the half open interval [start, end),
# the range of real numbers x such that start <= x < end.
#
# A triple booking happens when three events have some non-empty intersection
# (ie., there is some time that is common to all 3 events.)
#
# For each call to the method MyCalendar.book,
# return true if the event can be added to the calendar successfully without causing a triple booking.
# Otherwise, return false and do not add the event to the calendar.
#
# Your class will be called like this: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)
# Example 1:
# MyCalendar();
# MyCalendar.book(10, 20); // returns true
# MyCalendar.book(50, 60); // returns true
# MyCalendar.book(10, 40); // returns true
# MyCalendar.book(5, 15); // returns false
# MyCalendar.book(5, 10); // returns true
# MyCalendar.book(25, 55); // returns true
#
# Explanation:
# The first two events can be booked. The third event can be double booked.
# The fourth event (5, 15) can't be booked, because it would result in a triple booking.
# The fifth event (5, 10) can be booked, as it does not use time 10 which is already double booked.
# The sixth event (25, 55) can be booked, as the time in [25, 40) will be double booked with the third event;
# the time [40, 50) will be single booked, and the time [50, 55) will be double booked with the second event.
#
# Note:
# - The number of calls to MyCalendar.book per test case will be at most 1000.
# - In calls to MyCalendar.book(start, end), start and end are integers in the range [0, 10^9].
class MyCalendarTwo(object):
def __init__(self):
self.__overlaps = []
self.__calendar = []
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
for i, j in self.__overlaps:
if start < j and end > i:
return False
for i, j in self.__calendar:
if start < j and end > i:
self.__overlaps.append((max(start, i), min(end, j)))
self.__calendar.append((start, end))
return True
# Your MyCalendarTwo object will be instantiated and called as such:
# obj = MyCalendarTwo()
# param_1 = obj.book(start,end)
| 38.215385
| 109
| 0.671498
|
b144821562d991c3fec73087a768738898783537
| 47,872
|
py
|
Python
|
dandeliondiary/compare/views.py
|
amberdiehl/dandeliondiary_project
|
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
|
[
"FSFAP"
] | null | null | null |
dandeliondiary/compare/views.py
|
amberdiehl/dandeliondiary_project
|
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
|
[
"FSFAP"
] | 6
|
2020-04-29T23:54:15.000Z
|
2022-03-11T23:25:24.000Z
|
dandeliondiary/compare/views.py
|
amberdiehl/dandeliondiary_project
|
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
|
[
"FSFAP"
] | null | null | null |
import datetime
import operator
from decimal import *
from django.forms import fields
from django.shortcuts import redirect, render, render_to_response, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Count, Sum
from hashids import Hashids
from capture.helpers import helper_budget_categories, composite_category_name
from household.helpers import helper_get_me
from .helpers import *
from .models import MyBudgetGroup, MyBudgetCategory, MyBudget
HASH_SALT = 'nowis Ag00d tiM3for tW0BR3wskies'
HASH_MIN_LENGTH = 16 # Note that this value is still hard-coded in URLs for validation
@login_required
def compare_dashboard(request):
"""
The dashboard landing page for compare shows current graphical status of budget and expenses. It also
triggers the creation of the budget setup when an account is first created on Dandelion.
:param request:
:return:
"""
# Get household, validate active subscription
me = helper_get_me(request.user.pk)
if me.get('redirect'):
return redirect('household:household_dashboard')
else:
# Send customized date range based on when subscriber started using Dandelion Diary
years = ()
try:
expenses = MyExpenseItem.objects.filter(household=me.get('household_key')).order_by('expense_date')[0]
except IndexError:
start_year = datetime.datetime.now().year
else:
start_year = expenses.expense_date.year
current_year = datetime.datetime.now().year
for yr in range(start_year, current_year+1):
years += yr,
category_choices = helper_budget_categories(me.get('household_key'), top_load=True,
no_selection='All categories')
category_chooser = fields.ChoiceField(
choices=category_choices
)
context = {
'page_title': 'Compare Dashboard',
'url': 'compare:compare_dashboard',
'options': get_month_options(),
'years': sorted(years, reverse=True),
'choose_category': category_chooser.widget.render('choose_category', 0)
}
return render(request, 'compare/compare_dashboard.html', context)
@login_required
def budget_and_expenses(request):
"""
Show current (or past)budgets and associated expenses, balance.
:param request:
:return:
"""
# Get household, validate active subscription
me = helper_get_me(request.user.pk)
if me.get('redirect'):
return redirect('household:household_dashboard')
else:
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
group_tabs = []
group_keys = ''
for group in budget_groups:
group_tabs.append(group.my_group_name)
group_keys += hashids.encode(group.pk) + ','
context = {
'page_title': 'Budget + Expenses',
'url': 'compare:budgets_expenses',
'tabs': group_tabs,
'keys': group_keys,
'options': get_month_options(),
}
return render(request, 'compare/budget_expenses.html', context)
@login_required
def budget(request):
"""
Define the budget for each group/category of expenses.
:param request:
:return:
"""
# Get household, validate active subscription
me = helper_get_me(request.user.pk)
if me.get('redirect'):
return redirect('household:household_dashboard')
else:
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
group_tabs = []
group_keys = ''
for group in budget_groups:
group_tabs.append(group.my_group_name)
group_keys += hashids.encode(group.pk) + ','
context = {
'page_title': 'Budget',
'url': 'compare:budget',
'tabs': group_tabs,
'keys': group_keys,
}
return render(request, 'compare/budget.html', context)
@login_required
def groups_and_categories(request):
"""
This page enables the user to change names for template groups and categories and to add their own groups and
categories, if desired.
:param request:
:return:
"""
# Get household, validate active subscription
me = helper_get_me(request.user.pk)
if me.get('redirect'):
return redirect('household:household_dashboard')
else:
context = {
'page_title': 'Groups and Categories',
'url': 'compare:groups_categories',
}
return render(request, 'compare/groups_categories.html', context=context)
@login_required
def ajax_dashboard_snapshot(request, dt):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['status'] = 'ERROR'
return JsonResponse(response_data)
filter_date = datetime.datetime.strptime(dt, '%Y-%m-%d').date()
today = datetime.datetime.now().date()
if today.year == filter_date.year and today.month == filter_date.month:
days_remaining = filter_date.day - today.day
else:
days_remaining = 0
# setup columns for budget and expenses column chart
budget_expense_columnchart = {}
cols_budget_expense_columnchart = [
{'id': 'groups', 'label': 'Groups', 'type': 'string'},
{'id': 'budget', 'label': 'Budget', 'type': 'number'},
{'id': 'expenses', 'label': 'Expenses', 'type': 'number'}
]
rows_budget_expense_columnchart = []
total_budget = 0
total_expenses = 0
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
for group in budget_groups:
amounts = helper_get_group_budget_and_expenses(group, filter_date=filter_date, fetch_expenses=True)
total_budget += amounts['group_budget']
total_expenses += amounts['group_expenses']
row_budget_expense_columnchart = {'c': [{'v': group.my_group_name},
{'v': int(amounts['group_budget'])},
{'v': int(amounts['group_expenses'])}]}
rows_budget_expense_columnchart.append(row_budget_expense_columnchart)
budget_expense_columnchart['cols'] = cols_budget_expense_columnchart
budget_expense_columnchart['rows'] = rows_budget_expense_columnchart
response_data['status'] = 'OK'
response_data['totalBudget'] = total_budget
response_data['totalExpenses'] = total_expenses
response_data['netRemaining'] = total_budget - total_expenses
response_data['daysRemaining'] = days_remaining
response_data['budgetExpenseColumnchart'] = budget_expense_columnchart
return JsonResponse(response_data)
@login_required
def ajax_dashboard_month_series(request, from_date, to_date, category):
"""
Gets the net difference of budget and expenses for a given series of months. Note that although dates require
"day" being 01, the entire month of expenses are retrieved.
:param request:
:param from_date: Must use format 2016-01-01 where day is always set to 01
:param to_date: Must use format 2016-12-01 where day is always set to 01
:param category: If provided, s/b category key to filter results
:return:
"""
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['status'] = 'ERROR'
return JsonResponse(response_data)
f_dt = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()
t_dt = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()
today = datetime.datetime.now().date()
# setup for column chart
column_chart = {}
cols = [
{'id': 'month', 'label': 'Month', 'type': 'string'},
{'id': 'amount', 'label': 'Net Amount', 'type': 'number'},
{'type': 'string', 'role': 'style'}
]
rows = []
# Create dictionary object to store analysis data
analysis_data = {}
this_date = f_dt
while this_date <= t_dt:
month_net = 0
month_abbr = this_date.strftime('%b')
category_key = int(category)
# Select effective budget based on last day of month (period) being processed, not first day
future_date = this_date + datetime.timedelta(days=32)
full_month = future_date.replace(day=1) - datetime.timedelta(days=1)
# Get all, most current, budget records for the household
budgets = MyBudget.objects.filter(category__my_budget_group__household=me.get('household_key')) \
.filter(effective_date__year__lte=this_date.year, effective_date__lte=full_month) \
.values('category', 'category__my_category_name', 'amount', 'annual_payment_month') \
.order_by('category', '-effective_date') \
.distinct('category')
# Filter to just one category when category has been selected by user
if category_key:
budgets = budgets.filter(category=category_key)
if this_date <= today:
# Get the sum of budgets for the month; only add an annual budget amount when set for the current month
for budget in budgets:
if budget['annual_payment_month'] == 0 or budget['annual_payment_month'] == this_date.month:
month_net += budget['amount']
# Get the sum of expenses for the entire month, or for an individual category if selected by user
if not category_key:
expenses = MyExpenseItem.objects.filter(household=me.get('household_key')) \
.filter(expense_date__year=this_date.year, expense_date__month=this_date.month) \
.aggregate(Sum('amount'))
else:
expenses = MyExpenseItem.objects.filter(household=me.get('household_key')) \
.filter(expense_date__year=this_date.year, expense_date__month=this_date.month) \
.filter(category=category_key) \
.aggregate(Sum('amount'))
# If expenses, subtract to get net for month
if expenses.get('amount__sum') is None:
pass
else:
month_net -= expenses.get('amount__sum')
# Capture category level actual budget and expense data to produce analysis
analysis_data[this_date.month] = list(budgets)
for category_dict in analysis_data[this_date.month]:
category_dict['expenses'] = get_expenses_for_period(category_dict['category'], this_date, full_month)
else:
# Capture future budgets for analysis
analysis_data[this_date.month] = list(budgets)
if month_net < 0:
color = '#FA490F'
else:
color = '#8EAF17'
row = {'c': [{'v': month_abbr},
{'v': int(month_net)},
{'v': color}
]}
rows.append(row)
this_date += datetime.timedelta(days=32)
this_date = this_date.replace(day=1)
column_chart['cols'] = cols
column_chart['rows'] = rows
analysis = {
'totalBudget': 0,
'totalExpenses': 0,
'forecastVariance': 0,
'primary_neg_drivers': [],
'primary_pos_drivers': [],
'secondary_drivers': []
}
analysis_by_category = {}
if today.month >= 3: # TODO: Need to convert to at least 3 months of expenses, not assume beginning of year
analysis['show'] = True
# Calculate variances for past months
for ndx in range(1, today.month):
for category_dict in analysis_data[ndx]:
if category_dict['annual_payment_month'] in [0, ndx]:
variance = 1 - (category_dict['expenses'] / category_dict['amount'])
else:
if category_dict['expenses'] > 0: # Mistimed annual payment
variance = Decimal(-1)
else:
variance = Decimal(0) # Annual payment that has not occurred yet
if category_dict['category'] in analysis_by_category:
analysis_by_category[category_dict['category']]['variances'].append(variance)
else:
analysis_by_category[category_dict['category']] = {}
analysis_by_category[category_dict['category']]['variances'] = [variance]
analysis_by_category[category_dict['category']]['name'] = category_dict['category__my_category_name']
# Evaluate variances
for category in analysis_by_category:
category_data = analysis_by_category[category]
category_data['average_variance'] = sum(category_data['variances']) / len(category_data['variances'])
if abs(category_data['average_variance']) > abs(.05):
var_percent = float(sum(1 for x in category_data['variances'] if x < 0)) / len(category_data['variances'])
if var_percent > float(.6):
analysis['primary_neg_drivers'].append('{} is over budget {}% of the time.'
.format(category_data['name'], int(var_percent*100)))
else:
if var_percent > float(.4):
analysis['secondary_drivers'].append('{} - over'.format(category_data['name']))
var_percent = float(sum(1 for x in category_data['variances'] if x > 0)) / len(category_data['variances'])
if var_percent > float(.6):
analysis['primary_pos_drivers'].append('{} is under budget {}% of the time.'
.format(category_data['name'], int(var_percent * 100)))
else:
if var_percent > float(.4):
analysis['secondary_drivers'].append('{} - under'.format(category_data['name']))
# Forecast expenses for current and future months based on past average variances
for ndx in range(today.month, 13):
category_dict = analysis_data[ndx]
for category_data in category_dict:
avg_var = analysis_by_category[category_data['category']]['average_variance']
if avg_var < 0:
estimated_expenses = ((1 + abs(avg_var)) * category_data['amount'])
else:
estimated_expenses = (1 - avg_var) * category_data['amount']
if category_data['annual_payment_month'] in [0, ndx]:
if ndx == today.month:
if estimated_expenses > category_data['expenses']:
category_data['actual_expenses'] = category_data['expenses']
category_data['expenses'] = estimated_expenses
else:
category_data['actual_expenses'] = 0
category_data['expenses'] = estimated_expenses
# Calculate overall projected spending
for ndx in range(1, 13):
category_dict = analysis_data[ndx]
for category_data in category_dict:
if category_data['annual_payment_month'] in [0, ndx]:
analysis['totalBudget'] += category_data['amount']
analysis['totalExpenses'] += category_data.get('expenses', 0)
analysis['totalExpenses'] = int(analysis['totalExpenses'])
analysis['forecastVariance'] = analysis['totalBudget'] - analysis['totalExpenses']
else:
analysis['show'] = False
response_data['status'] = 'OK'
response_data['monthSeries'] = column_chart
response_data['analysis'] = analysis
return JsonResponse(response_data)
@login_required
def ajax_dashboard_budget_drivers(request, from_date, to_date):
"""
Gets the top (or bottom TBD) postive and negative budget drivers for the given year or rolling past 12 months.
Note that although dates require "day" being 01, the entire month of expenses are retrieved.
:param request:
:param from_date: Must use format 2016-01-01 where day is always set to 01
:param to_date: Must use format 2016-12-01 where day is always set to 01
:return:
"""
# TODO: Add auto-rolling feature.
# TODO: Add total for positive and negative drivers to provide context or even another graph.
# TODO: Add analysis; e.g. how frequently item was over/under budget. Make recommendation regarding new budget amt.
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['status'] = 'ERROR'
return JsonResponse(response_data)
f_dt = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()
t_dt = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()
today = datetime.datetime.now().date()
# setup for column chart - positive
column_chart_1 = {}
cols_1 = [
{'id': 'category', 'label': 'Category', 'type': 'string'},
{'id': 'amount', 'label': 'Net Amount', 'type': 'number'},
{'type': 'string', 'role': 'style'}
]
rows_1 = []
# setup for column chart - negative
column_chart_2 = {}
cols_2 = [
{'id': 'category', 'label': 'Category', 'type': 'string'},
{'id': 'amount', 'label': 'Net Amount', 'type': 'number'},
{'type': 'string', 'role': 'style'}
]
rows_2 = []
# Create dictionary object to store analysis data
analysis_data = {}
this_date = f_dt
while this_date <= t_dt:
# Select effective budget based on last day of month (period) being processed, not first day
future_date = this_date + datetime.timedelta(days=32)
full_month = future_date.replace(day=1) - datetime.timedelta(days=1)
# Don't process future month(s) because they'll skew the results; include a month if most of it has
# transpired.
if today < full_month:
if full_month - today > datetime.timedelta(days=5):
break
# Get all, most current, budget records for the household
budgets = MyBudget.objects.filter(category__my_budget_group__household=me.get('household_key')) \
.filter(effective_date__year__lte=this_date.year, effective_date__lte=full_month) \
.values('pk', 'category', 'category__my_category_name', 'category__parent_category', 'amount',
'annual_payment_month') \
.order_by('category', '-effective_date') \
.distinct('category')
# For each budget, get net result for the month
for budget in budgets:
month_net = 0
if budget['annual_payment_month'] == 0 or budget['annual_payment_month'] == this_date.month:
month_net += budget['amount']
expenses = MyExpenseItem.objects.filter(household=me.get('household_key')) \
.filter(expense_date__year=this_date.year, expense_date__month=this_date.month) \
.filter(category=budget['category']) \
.aggregate(Sum('amount'))
if expenses.get('amount__sum') is None:
pass
else:
month_net -= expenses.get('amount__sum')
if analysis_data.get(budget['category'], None):
analysis_data[budget['category']] += month_net
else:
analysis_data[budget['category']] = month_net
this_date += datetime.timedelta(days=32)
this_date = this_date.replace(day=1)
# Sort dictionary by amounts, storing in a list of tuples.
sorted_categories = sorted(analysis_data.items(), key=operator.itemgetter(1))
# Store top 5 negative drivers (net amount, across all months for a given category being negative)
count, color = 1, '#FA490F'
for item in sorted_categories:
if item[1] < 0:
cat = MyBudgetCategory.objects.get(pk=item[0])
row = {'c': [{'v': composite_category_name(cat.my_category_name, cat.parent_category, cat.my_budget_group)},
{'v': int(item[1])},
{'v': color}
]}
rows_1.append(row)
else:
break
if count == 5:
break
count += 1
column_chart_1['cols'] = cols_1
column_chart_1['rows'] = rows_1
# Store top 5 positive drivers (net amount, across all months for a given category being positive)
color = '#8EAF17'
for item in sorted_categories[len(sorted_categories)-5:]:
if item[1] >= 0:
cat = MyBudgetCategory.objects.get(pk=item[0])
row = {'c': [{'v': composite_category_name(cat.my_category_name, cat.parent_category, cat.my_budget_group)},
{'v': int(item[1])},
{'v': color}
]}
rows_2.append(row)
column_chart_2['cols'] = cols_2
column_chart_2['rows'] = rows_2
response_data['status'] = 'OK'
response_data['positiveDrivers'] = column_chart_2
response_data['negativeDrivers'] = column_chart_1
return JsonResponse(response_data)
@login_required
def ajax_dashboard_budget(request, dt):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['status'] = 'ERROR'
return JsonResponse(response_data)
filter_date = datetime.datetime.strptime(dt, '%Y-%m-%d').date()
# setup columns for budget pie chart
budget_piechart = {}
cols_budget_piechart = [
{'id': 'groups', 'label': 'Groups', 'type': 'string'},
{'id': 'amount', 'label': 'Amount', 'type': 'number'}
]
rows_budget_piechart = []
total_budget = 0
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
for group in budget_groups:
amounts = helper_get_group_budget_and_expenses(group, filter_date=filter_date, fetch_expenses=False)
total_budget += amounts['group_budget']
row_budget_piechart = {'c': [{'v': group.my_group_name},
{'v': int(amounts['group_budget'])}]}
rows_budget_piechart.append(row_budget_piechart)
budget_piechart['cols'] = cols_budget_piechart
budget_piechart['rows'] = rows_budget_piechart
response_data['status'] = 'OK'
response_data['totalBudget'] = total_budget
response_data['budgetPiechart'] = budget_piechart
return JsonResponse(response_data)
@login_required
def ajax_be_groups(request, dt):
"""
Budgets + Expenses: Show current or past budget and expense information
:param request:
:param dt:
:return:
"""
response_data = {}
# Get household, validate active subscription
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
else:
filter_date = datetime.datetime.strptime(dt, '%Y-%m-%d').date()
data = []
budget_total = 0
expense_total = 0
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
for group in budget_groups:
record = {}
record['group'] = group.my_group_name
amounts = helper_get_group_budget_and_expenses(group, filter_date=filter_date)
record['budget'] = amounts['group_budget']
record['expense'] = amounts['group_expenses']
record['balance'] = amounts['group_budget'] - amounts['group_expenses']
data.append(record)
budget_total += amounts['group_budget']
expense_total += amounts['group_expenses']
record = {}
record['group'] = '<b>** Total</b>'
record['budget'] = '<b>{}</b>'.format(budget_total)
record['expense'] = '<b>{}</b>'.format(expense_total)
record['balance'] = '<b>{}</b>'.format(budget_total-expense_total)
data.append(record)
response_data['Result'] = 'OK'
response_data['Records'] = data
return JsonResponse(response_data)
@login_required
def ajax_be_categories(request, pid, dt):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
else:
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
this = hashids.decode(pid)[0]
filter_date = datetime.datetime.strptime(dt, '%Y-%m-%d').date()
data = []
budget_total = 0
expense_total = 0
budget_categories = MyBudgetCategory.objects.filter(my_budget_group=this).filter(parent_category=None)\
.order_by('my_category_name')
for category in budget_categories:
record = {}
record['my_category_name'] = category.my_category_name
amounts = helper_get_category_budget_and_expenses(category, filter_date=filter_date, fetch_expenses=True)
record['budget'] = amounts['budget']
record['expense'] = amounts['expenses']
record['balance'] = amounts['budget'] - amounts['expenses']
data.append(record)
budget_total += amounts['budget']
expense_total += amounts['expenses']
record = {}
record['my_category_name'] = '<b>** Total</b>'
record['budget'] = '<b>{}</b>'.format(budget_total)
record['expense'] = '<b>{}</b>'.format(expense_total)
record['balance'] = '<b>{}</b>'.format(budget_total-expense_total)
data.append(record)
response_data['Result'] = 'OK'
response_data['Records'] = data
return JsonResponse(response_data)
@login_required
def ajax_list_groups(request):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
else:
data = []
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
for group in budget_groups:
record = {}
record['id'] = hashids.encode(group.pk)
record['my_group_name'] = group.my_group_name
record['group_description'] = group.group_description
record['group_list_order'] = group.group_list_order
record['core'] = 'no'
if group.group_perma_key:
record['core'] = 'yes'
data.append(record)
response_data['Result'] = 'OK'
response_data['Records'] = data
return JsonResponse(response_data)
@login_required
def ajax_create_group(request):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_group_inputs(request.POST.get('my_group_name'),
request.POST.get('group_description'),
request.POST.get('group_list_order')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid group name, description and/or list order given.'
return JsonResponse(response_data)
gr_name = request.POST.get('my_group_name')
gr_description = request.POST.get('group_description')
gr_list_order = request.POST.get('group_list_order')
new_group = MyBudgetGroup()
new_group.household = me.get('household_obj')
new_group.my_group_name = gr_name
new_group.group_description = gr_description
new_group.group_list_order = gr_list_order
new_group.save()
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
record = {}
record['id'] = hashids.encode(new_group.pk)
record['my_group_name'] = new_group.my_group_name
record['group_description'] = new_group.group_description
record['group_list_order'] = new_group.group_list_order
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_update_group(request):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_id_input(request.POST.get('id')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
if not validate_group_inputs(request.POST.get('my_group_name'),
request.POST.get('group_description'),
request.POST.get('group_list_order')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid group name, description and/or list order given.'
return JsonResponse(response_data)
# Get budget group for received ID and validate association with logged in user household
id_hashed = request.POST.get('id')
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
this = hashids.decode(id_hashed)[0]
try:
budget_group = MyBudgetGroup.objects.get(pk=this)
except ObjectDoesNotExist:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Error getting budget group.'
return JsonResponse(response_data)
if not budget_group.household.pk == me.get('household_key'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request for budget group.'
return JsonResponse(response_data)
record = {}
if not budget_group.my_group_name == request.POST.get('my_group_name'):
budget_group.my_group_name = request.POST.get('my_group_name')
record['my_group_name'] = request.POST.get('my_group_name')
if not budget_group.group_description == request.POST.get('group_description'):
budget_group.group_description = request.POST.get('group_description')
record['group_description'] = request.POST.get('group_description')
if not budget_group.group_list_order == request.POST.get('group_list_order'):
budget_group.group_list_order = request.POST.get('group_list_order')
record['group_list_order'] = request.POST.get('group_list_order')
budget_group.save()
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_delete_group(request):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_id_input(request.POST.get('id')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
id_hashed = request.POST.get('id')
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
this = hashids.decode(id_hashed)[0]
try:
budget_group = MyBudgetGroup.objects.get(pk=this)
except ObjectDoesNotExist:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Error getting budget group.'
return JsonResponse(response_data)
if not budget_group.household.pk == me.get('household_key'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request for budget group.'
return JsonResponse(response_data)
if budget_group.group_perma_key:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Sorry, this is a core budget group used for comparisons and cannot be deleted.'
return JsonResponse(response_data)
budget_group.delete()
response_data['Result'] = 'OK'
return JsonResponse(response_data)
@login_required
def ajax_list_categories(request, s, pid):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
else:
# TODO: Needs more validation.
data = []
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
this = hashids.decode(pid)[0]
# Get parent level categories for flags 'p'arent and 'h'ybrid
if s == 'p' or s == 'h':
budget_categories = MyBudgetCategory.objects.filter(my_budget_group=this).filter(parent_category=None)\
.order_by('my_category_name')
else:
# Fetch 'c'hildren categories
budget_categories = MyBudgetCategory.objects.filter(parent_category=this).order_by('my_category_name')
for category in budget_categories:
# When a hybrid is requested mash parent and child categories together returning key to child
children = None
if s == 'h':
children = MyBudgetCategory.objects.filter(parent_category=category.pk).order_by('my_category_name')
if children:
for child in children:
record = {}
record['id'] = hashids.encode(child.pk)
record['my_category_name'] = category.my_category_name + ' > ' + child.my_category_name
record['core'] = 'no'
if child.category_perma_key:
record['core'] = 'yes'
data.append(record)
else:
record = {}
record['id'] = hashids.encode(category.pk)
record['my_category_name'] = category.my_category_name
record['core'] = 'no'
if category.category_perma_key:
record['core'] = 'yes'
data.append(record)
response_data['Result'] = 'OK'
response_data['Records'] = data
return JsonResponse(response_data)
@login_required
def ajax_create_category(request, pid):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_category_name_input(request.POST.get('my_category_name')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
this = hashids.decode(pid)[0] # category's group pk
cat_name = request.POST.get('my_category_name')
# TODO: don't allow duplicate or similar categories
group_obj = MyBudgetGroup.objects.get(pk=this)
new_category = MyBudgetCategory()
new_category.my_budget_group = group_obj
new_category.my_category_name = cat_name
new_category.save()
record = {}
record['id'] = hashids.encode(new_category.pk)
record['my_category_name'] = new_category.my_category_name
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_update_category(request):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_id_input(request.POST.get('id')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
if not validate_category_name_input(request.POST.get('my_category_name')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
id_hashed = request.POST.get('id') # category pk
this = hashids.decode(id_hashed)[0]
try:
budget_category = MyBudgetCategory.objects.get(pk=this)
except ObjectDoesNotExist:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Error getting budget category.'
return JsonResponse(response_data)
budget_category.my_category_name = request.POST.get('my_category_name')
budget_category.save()
record = {}
record['my_category_name'] = request.POST.get('my_category_name')
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_delete_category(request):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_id_input(request.POST.get('id')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
id_hashed = request.POST.get('id') # category pk
this = hashids.decode(id_hashed)[0]
try:
budget_category = MyBudgetCategory.objects.get(pk=this)
except ObjectDoesNotExist:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Error getting budget category.'
return JsonResponse(response_data)
if budget_category.category_perma_key:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Sorry, this is a core budget category used for comparisons and cannot be deleted.'
return JsonResponse(response_data)
budget_category.delete()
response_data['Result'] = 'OK'
return JsonResponse(response_data)
@login_required
def ajax_create_child_category(request, pid):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_category_name_input(request.POST.get('my_category_name')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
this = hashids.decode(pid)[0] # category's parent pk
cat_name = request.POST.get('my_category_name')
# TODO: don't allow duplicate or similar categories; assuming use of google places will help
parent = MyBudgetCategory.objects.get(pk=this)
new_category = MyBudgetCategory()
new_category.my_budget_group = parent.my_budget_group
new_category.my_category_name = cat_name
new_category.parent_category = parent
new_category.save()
record = {}
record['id'] = hashids.encode(new_category.pk)
record['my_category_name'] = new_category.my_category_name
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_list_budgets(request, pid):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
else:
data = []
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
budget_records = MyBudget.objects.filter(category=hashids.decode(pid)[0]).order_by('-effective_date')
for budget in budget_records:
record = {}
record['id'] = hashids.encode(budget.pk)
record['amount'] = budget.amount
record['annual_payment_month'] = budget.annual_payment_month
record['note'] = budget.note
record['effective_date'] = budget.effective_date
data.append(record)
response_data['Result'] = 'OK'
response_data['Records'] = data
return JsonResponse(response_data)
@login_required
def ajax_create_budget(request, pid):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_budget_inputs(request.POST.get('amount'),
request.POST.get('annual_payment_month'),
request.POST.get('note'),
request.POST.get('effective_date')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid budget amount, annual month, note and/or effective date given.'
return JsonResponse(response_data)
# TODO: Capture invalid date; e.g. 2016-01-45
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
category_obj = MyBudgetCategory.objects.get(pk=hashids.decode(pid)[0])
new_budget = MyBudget()
new_budget.category = category_obj
new_budget.amount = request.POST.get('amount')
new_budget.annual_payment_month = request.POST.get('annual_payment_month')
new_budget.note = request.POST.get('note')
new_budget.effective_date = request.POST.get('effective_date')
new_budget.save()
record = {}
record['id'] = hashids.encode(new_budget.pk)
record['amount'] = new_budget.amount
record['annual_payment_month'] = new_budget.annual_payment_month
record['note'] = new_budget.note
record['effective_date'] = new_budget.effective_date
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_change_budget(request, s):
response_data = {}
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
# Validate content type of data submitted before continuing
if not validate_id_input(request.POST.get('id')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
return JsonResponse(response_data)
if s == 'u':
if not validate_budget_inputs(request.POST.get('amount'),
request.POST.get('annual_payment_month'),
request.POST.get('note'),
request.POST.get('effective_date')):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid budget amount, annual month, note and/or effective date given.'
return JsonResponse(response_data)
hashids = Hashids(salt=HASH_SALT, min_length=HASH_MIN_LENGTH)
try:
budget = MyBudget.objects.get(pk=hashids.decode(request.POST.get('id'))[0])
except ObjectDoesNotExist:
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Error getting budget.'
return JsonResponse(response_data)
if s == 'd':
budget.delete()
response_data['Result'] = 'OK'
else:
budget.amount = request.POST.get('amount')
budget.annual_payment_month = request.POST.get('annual_payment_month')
budget.note = request.POST.get('note')
budget.effective_date = datetime.datetime.strptime(request.POST.get('effective_date'), '%Y-%m-%d')
budget.save()
# Return only changed values since jTable supports it
record = {}
if budget.amount != request.POST.get('amount'):
record['amount'] = request.POST.get('amount')
if budget.annual_payment_month != request.POST.get('annual_payment_month'):
record['annual_payment_month'] = request.POST.get('annual_payment_month')
if budget.note != request.POST.get('note'):
record['note'] = request.POST.get('note')
if budget.effective_date != request.POST.get('effective_date'):
record['effective_date'] = request.POST.get('effective_date')
response_data['Result'] = 'OK'
response_data['Record'] = record
return JsonResponse(response_data)
@login_required
def ajax_budget_summary(request):
"""
Create summary budget structure with amounts for display to Dandelion Diary subscriber.
This is currently ajax to enable dynamic updating of amounts.
Structure is:
Field1 - Displays hierarchy structure of group, category, child-category
Field2 - Amount for child category
Field3 - Amount (or sum) for category
Field4 - Sum for group, total budget
Field5 - Effective date for category budgets
:param request:
:return:
"""
response_data = {}
# Get household, validate active subscription
me = helper_get_me(request.user.pk)
if me.get('redirect'):
response_data['Result'] = 'ERROR'
response_data['Message'] = 'Invalid request.'
else:
data = []
template = {'field1': '', 'field2': '', 'field3': '', 'field4': '', 'field5': ''}
indent = ' '
budget_groups = MyBudgetGroup.objects.filter(household=me.get('household_key')).order_by('group_list_order')
budget_total = 0
for group in budget_groups:
record = template.copy()
record['field1'] = group.my_group_name
data.append(record)
group_total = 0
categories = MyBudgetCategory.objects.filter(my_budget_group=group).filter(parent_category=None)\
.order_by('my_category_name')
for category in categories:
category_budget = helper_get_category_budget_and_expenses(category, convert_annual=True)['budget']
group_total += category_budget
record = template.copy()
record['field1'] = indent + category.my_category_name
record['field2'] = category_budget
data.append(record)
budget_total += group_total
record = template.copy()
record['field1'] = 'Total for ' + group.my_group_name
record['field3'] = group_total
data.append(record)
record = template.copy()
record['field1'] = '<b>** Total monthly budget</b>'
record['field3'] = '<b>' + str(budget_total) + '</b>'
data.append(record)
response_data['Result'] = 'OK'
response_data['Records'] = data
return JsonResponse(response_data)
| 36.655436
| 122
| 0.635591
|
843f42b1b06cad50816d06ec5cb37be4c2e1adaa
| 111
|
py
|
Python
|
turf/helpers/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 11
|
2020-08-26T11:04:55.000Z
|
2022-01-26T14:53:10.000Z
|
turf/helpers/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 36
|
2020-04-09T16:49:05.000Z
|
2020-06-01T14:39:37.000Z
|
turf/helpers/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 5
|
2020-07-30T23:37:35.000Z
|
2021-08-24T08:10:28.000Z
|
from turf.helpers._conversions import *
from turf.helpers._features import *
from turf.helpers._units import *
| 27.75
| 39
| 0.810811
|
aab75f702d4eb403c5051841e10871822e9e8ea8
| 12,337
|
py
|
Python
|
homeassistant/components/media_player/demo.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | 2
|
2017-02-25T00:27:06.000Z
|
2017-02-25T03:09:30.000Z
|
homeassistant/components/media_player/demo.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | 1
|
2017-03-10T22:17:06.000Z
|
2017-03-10T22:17:06.000Z
|
homeassistant/components/media_player/demo.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | 2
|
2018-06-03T11:14:44.000Z
|
2018-11-04T18:18:12.000Z
|
"""
Demo implementation of the media player.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, SUPPORT_CLEAR_PLAYLIST, SUPPORT_PLAY,
MediaPlayerDevice)
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
import homeassistant.util.dt as dt_util
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the media player demo platform."""
add_devices([
DemoYoutubePlayer(
'Living Room', 'eyU3bRy2x44',
'♥♥ The Best Fireplace Video (3 hours)', 300),
DemoYoutubePlayer('Bedroom', 'kxopViU98Xo', 'Epic sax guy 10 hours',
360000),
DemoMusicPlayer(), DemoTVShowPlayer(),
])
YOUTUBE_COVER_URL_FORMAT = 'https://img.youtube.com/vi/{}/hqdefault.jpg'
YOUTUBE_PLAYER_SUPPORT = \
SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | SUPPORT_PLAY
MUSIC_PLAYER_SUPPORT = \
SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_CLEAR_PLAYLIST | SUPPORT_PLAY
NETFLIX_PLAYER_SUPPORT = \
SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
class AbstractDemoPlayer(MediaPlayerDevice):
"""A demo media players."""
# We only implement the methods that we support
def __init__(self, name):
"""Initialize the demo device."""
self._name = name
self._player_state = STATE_PLAYING
self._volume_level = 1.0
self._volume_muted = False
@property
def should_poll(self):
"""Push an update after each command."""
return False
@property
def name(self):
"""Return the name of the media player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
return self._player_state
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
return self._volume_muted
def turn_on(self):
"""Turn the media player on."""
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def turn_off(self):
"""Turn the media player off."""
self._player_state = STATE_OFF
self.schedule_update_ha_state()
def mute_volume(self, mute):
"""Mute the volume."""
self._volume_muted = mute
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set the volume level, range 0..1."""
self._volume_level = volume
self.schedule_update_ha_state()
def media_play(self):
"""Send play command."""
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._player_state = STATE_PAUSED
self.schedule_update_ha_state()
class DemoYoutubePlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
def __init__(self, name, youtube_id=None, media_title=None, duration=360):
"""Initialize the demo device."""
super().__init__(name)
self.youtube_id = youtube_id
self._media_title = media_title
self._duration = duration
self._progress = int(duration * .15)
self._progress_updated_at = dt_util.utcnow()
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.youtube_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_VIDEO
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._duration
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return YOUTUBE_COVER_URL_FORMAT.format(self.youtube_id)
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def app_name(self):
"""Return the current running application."""
return "YouTube"
@property
def supported_features(self):
"""Flag media player features that are supported."""
return YOUTUBE_PLAYER_SUPPORT
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._progress is None:
return None
position = self._progress
if self._player_state == STATE_PLAYING:
position += (dt_util.utcnow() -
self._progress_updated_at).total_seconds()
return position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self._player_state == STATE_PLAYING:
return self._progress_updated_at
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
self.youtube_id = media_id
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._progress = self.media_position
self._progress_updated_at = dt_util.utcnow()
super().media_pause()
class DemoMusicPlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
tracks = [
('Technohead', 'I Wanna Be A Hippy (Flamman & Abraxas Radio Mix)'),
('Paul Elstak', 'Luv U More'),
('Dune', 'Hardcore Vibes'),
('Nakatomi', 'Children Of The Night'),
('Party Animals',
'Have You Ever Been Mellow? (Flamman & Abraxas Radio Mix)'),
('Rob G.*', 'Ecstasy, You Got What I Need'),
('Lipstick', "I'm A Raver"),
('4 Tune Fairytales', 'My Little Fantasy (Radio Edit)'),
('Prophet', "The Big Boys Don't Cry"),
('Lovechild', 'All Out Of Love (DJ Weirdo & Sim Remix)'),
('Stingray & Sonic Driver', 'Cold As Ice (El Bruto Remix)'),
('Highlander', 'Hold Me Now (Bass-D & King Matthew Remix)'),
('Juggernaut', 'Ruffneck Rules Da Artcore Scene (12" Edit)'),
('Diss Reaction', 'Jiiieehaaaa '),
('Flamman And Abraxas', 'Good To Go (Radio Mix)'),
('Critical Mass', 'Dancing Together'),
('Charly Lownoise & Mental Theo',
'Ultimate Sex Track (Bass-D & King Matthew Remix)'),
]
def __init__(self):
"""Initialize the demo device."""
super().__init__('Walkman')
self._cur_track = 0
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return 'bounzz-1'
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return 213
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return 'https://graph.facebook.com/v2.5/107771475912710/' \
'picture?type=large'
@property
def media_title(self):
"""Return the title of current playing media."""
return self.tracks[self._cur_track][1] if len(self.tracks) > 0 else ""
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self.tracks[self._cur_track][0] if len(self.tracks) > 0 else ""
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
# pylint: disable=no-self-use
return "Bounzz"
@property
def media_track(self):
"""Return the track number of current media (Music track only)."""
return self._cur_track + 1
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = MUSIC_PLAYER_SUPPORT
if self._cur_track > 0:
support |= SUPPORT_PREVIOUS_TRACK
if self._cur_track < len(self.tracks) - 1:
support |= SUPPORT_NEXT_TRACK
return support
def media_previous_track(self):
"""Send previous track command."""
if self._cur_track > 0:
self._cur_track -= 1
self.schedule_update_ha_state()
def media_next_track(self):
"""Send next track command."""
if self._cur_track < len(self.tracks) - 1:
self._cur_track += 1
self.schedule_update_ha_state()
def clear_playlist(self):
"""Clear players playlist."""
self.tracks = []
self._cur_track = 0
self._player_state = STATE_OFF
self.schedule_update_ha_state()
class DemoTVShowPlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
def __init__(self):
"""Initialize the demo device."""
super().__init__('Lounge room')
self._cur_episode = 1
self._episode_count = 13
self._source = 'dvd'
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return 'house-of-cards-1'
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return 3600
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return 'https://graph.facebook.com/v2.5/HouseofCards/picture?width=400'
@property
def media_title(self):
"""Return the title of current playing media."""
return 'Chapter {}'.format(self._cur_episode)
@property
def media_series_title(self):
"""Return the series title of current playing media (TV Show only)."""
return 'House of Cards'
@property
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return 1
@property
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self._cur_episode
@property
def app_name(self):
"""Return the current running application."""
return "Netflix"
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = NETFLIX_PLAYER_SUPPORT
if self._cur_episode > 1:
support |= SUPPORT_PREVIOUS_TRACK
if self._cur_episode < self._episode_count:
support |= SUPPORT_NEXT_TRACK
return support
def media_previous_track(self):
"""Send previous track command."""
if self._cur_episode > 1:
self._cur_episode -= 1
self.schedule_update_ha_state()
def media_next_track(self):
"""Send next track command."""
if self._cur_episode < self._episode_count:
self._cur_episode += 1
self.schedule_update_ha_state()
def select_source(self, source):
"""Set the input source."""
self._source = source
self.schedule_update_ha_state()
| 31.232911
| 79
| 0.638243
|
f16f1005b70ca07b78965cc13a23505f947bfe76
| 1,710
|
py
|
Python
|
tests/matrix_max_pool_negative/test_matrix_max_pool_negative_int32_ksize2_stride2.py
|
RyusukeYamano/nngen
|
9ed1f7fb83908794aa94d70287d89545d45fe875
|
[
"Apache-2.0"
] | 207
|
2019-11-12T11:42:25.000Z
|
2022-03-20T20:32:17.000Z
|
tests/matrix_max_pool_negative/test_matrix_max_pool_negative_int32_ksize2_stride2.py
|
RyusukeYamano/nngen
|
9ed1f7fb83908794aa94d70287d89545d45fe875
|
[
"Apache-2.0"
] | 31
|
2019-11-25T07:33:30.000Z
|
2022-03-17T12:34:34.000Z
|
tests/matrix_max_pool_negative/test_matrix_max_pool_negative_int32_ksize2_stride2.py
|
RyusukeYamano/nngen
|
9ed1f7fb83908794aa94d70287d89545d45fe875
|
[
"Apache-2.0"
] | 29
|
2019-11-07T02:25:48.000Z
|
2022-03-12T16:22:57.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
import veriloggen
import matrix_max_pool_negative
act_shape = (1, 7, 7, 15)
act_dtype = ng.int32
out_dtype = ng.int32
ksize = (1, 2, 2, 1)
stride = (1, 2, 2, 1)
par = 1
value_ram_size = None
out_ram_size = None
axi_datawidth = 32
def test(request, silent=True):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = matrix_max_pool_negative.run(act_shape,
act_dtype, out_dtype,
ksize, stride,
par, value_ram_size, out_ram_size,
axi_datawidth, silent,
filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
if __name__ == '__main__':
rslt = matrix_max_pool_negative.run(act_shape,
act_dtype, out_dtype,
ksize, stride,
par, value_ram_size, out_ram_size,
axi_datawidth, silent=False,
filename='tmp.v',
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
print(rslt)
| 31.666667
| 108
| 0.531579
|
72b065a98a8833477315344bdc630311f0ec5236
| 1,472
|
py
|
Python
|
ddtn/transformers/construct_localization_net.py
|
DukeGonzo/ddtn
|
156cf5fb2f2e46619c0243a5accfddbe3567f109
|
[
"MIT"
] | 51
|
2018-03-25T07:18:21.000Z
|
2022-02-11T12:05:52.000Z
|
ddtn/transformers/construct_localization_net.py
|
DukeGonzo/ddtn
|
156cf5fb2f2e46619c0243a5accfddbe3567f109
|
[
"MIT"
] | 2
|
2018-10-26T06:43:44.000Z
|
2018-12-20T02:05:31.000Z
|
ddtn/transformers/construct_localization_net.py
|
DukeGonzo/ddtn
|
156cf5fb2f2e46619c0243a5accfddbe3567f109
|
[
"MIT"
] | 7
|
2018-04-11T20:34:27.000Z
|
2021-07-19T17:57:40.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 18 10:01:13 2018
@author: nsde
"""
#%% Packages
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense, Flatten
from tensorflow.python.keras.layers import Conv2D, MaxPool2D
#from ddtn.transformers.transformer_util import get_transformer_init_weights
from ddtn.transformers.transformer_util import get_transformer_dim
#%%
def get_loc_net(input_shape, transformer_name = 'affine'):
""" Example on how a localization layer can look like """
# Get dimension for the last layer
dim = get_transformer_dim(transformer_name)
# TODO: find out why the zero weights destroy the affine_diffeo and CPAB
# Get weights for identity transformer. Note 50=#unit in second last layer
# weights = get_transformer_init_weights(50, transformer_name)
# Construct localization net
locnet = Sequential()
locnet.add(Conv2D(16, (3,3), activation='tanh', input_shape=input_shape))
locnet.add(MaxPool2D(pool_size=(2,2)))
locnet.add(Conv2D(32, (3,3), activation='tanh'))
locnet.add(MaxPool2D(pool_size=(2,2)))
locnet.add(Conv2D(32, (3,3), activation='tanh'))
locnet.add(MaxPool2D(pool_size=(2,2)))
locnet.add(Flatten())
locnet.add(Dense(50, activation='tanh'))
locnet.add(Dense(dim, activation='tanh'))
return locnet
#%%
if __name__ == "__main__":
loc_net = get_loc_net((250, 250, 1), transformer_name='affine')
| 35.902439
| 78
| 0.715353
|
1fd0fb4dd2db013d543f097f1f61e2ce2b6b1812
| 2,117
|
py
|
Python
|
simulation/models/diet_models.py
|
BenLatham/Agricultural-Simulation
|
23ae2c6ae2ae6d0b1ca318c6e4543ccbe40a0039
|
[
"MIT"
] | null | null | null |
simulation/models/diet_models.py
|
BenLatham/Agricultural-Simulation
|
23ae2c6ae2ae6d0b1ca318c6e4543ccbe40a0039
|
[
"MIT"
] | 5
|
2016-07-08T12:05:31.000Z
|
2016-07-08T17:31:23.000Z
|
simulation/models/diet_models.py
|
BenLatham/FLOSS-Agricultural-Simulation
|
23ae2c6ae2ae6d0b1ca318c6e4543ccbe40a0039
|
[
"MIT"
] | null | null | null |
from django.db import models
from .general_models import ScenarioSpecificBase
from django.forms.models import model_to_dict
class Feed(ScenarioSpecificBase):
me = models.FloatField()
fme = models.FloatField()
erdp = models.FloatField()
dup = models.FloatField()
adf = models.FloatField()
price = models.FloatField(null=True)
avaliable = models.FloatField(null=True)
feed_type = models.ForeignKey('FeedType')
maxInclusion = models.FloatField(null=True)
@property
def qm_ruminant(self):
"""
Yan+agnew 2004
:return: ratio of metabolisable energy to gross energy
"""
return (-0.000796 * self.adf) + 0.827
aggregable = ["me", "fme", "erdp", "dup", "adf", "price", "qm_ruminant"] # fields which can be meaningfully aggregated
def to_dict(self):
dict = model_to_dict(self)
dict["feed_type"]=self.feed_type.id
dict["qm_ruminant"]=self.qm_ruminant()
return dict
class FeedType(ScenarioSpecificBase):
minInclusion = models.FloatField(null=True)
maxInclusion = models.FloatField(null=True)
def to_dict(self):
dict ={}
dict["id"]=self.id
dict["minInclusion"]=self.minInclusion
dict["maxInclusion"]=self.maxInclusion
return dict
# class BreedDetails(models.Model):
# breed = models.CharField(max_length=20, unique=True)
#
# # Fox et. al. 1998
# MM = models.FloatField(null=True) # maintenance multiplier non-lactating
# MML = models.FloatField(null=True) # maintenance multiplier lactating
# BW = models.FloatField(null=True) # Calf Birth Weight kg
# peak_yield = models.FloatField(null=True) # Average peak milk yield kg
# BW_adjustment_Q1 = models.FloatField(null=True) # Q1 Birth weight adjustment/age of dam yr
# BW_adjustment_Q2 = models.FloatField(null=True) # Q2 Birth weight adjustment/age of dam yr
# BW_adjustment_Q3 = models.FloatField(null=True) # Q3 Birth weight adjustment/age of dam yr
# BW_adjustment_Q4 = models.FloatField(null=True) # Q4 Birth weight adjustment/age of dam yr
| 35.881356
| 123
| 0.684459
|
6f49e4fe32a624d0b2fb3bf6756882dd1db0e28a
| 14,050
|
py
|
Python
|
altair/utils/core.py
|
hugovk/altair
|
a3c9f06790f7a8c5c7e2c98278d0f69e4630b5be
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T21:42:09.000Z
|
2022-03-13T21:42:09.000Z
|
altair/utils/core.py
|
RoyMachineLearning/altair
|
74a765b373694776e63d224d99536975cc173810
|
[
"BSD-3-Clause"
] | null | null | null |
altair/utils/core.py
|
RoyMachineLearning/altair
|
74a765b373694776e63d224d99536975cc173810
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Utility routines
"""
import collections
from copy import deepcopy
import itertools
import re
import sys
import traceback
import warnings
import six
import pandas as pd
import numpy as np
try:
from pandas.api.types import infer_dtype
except ImportError: # Pandas before 0.20.0
from pandas.lib import infer_dtype
from .schemapi import SchemaBase, Undefined
TYPECODE_MAP = {'ordinal': 'O',
'nominal': 'N',
'quantitative': 'Q',
'temporal': 'T'}
INV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}
# aggregates from vega-lite version 2.4.3
AGGREGATES = ['argmax', 'argmin', 'average', 'count', 'distinct', 'max',
'mean', 'median', 'min', 'missing', 'q1', 'q3', 'ci0', 'ci1',
'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values',
'variance', 'variancep']
# window aggregates from vega-lite version 2.5.2
WINDOW_AGGREGATES = ["row_number", "rank", "dense_rank", "percent_rank",
"cume_dist", "ntile", "lag", "lead", "first_value",
"last_value", "nth_value"]
# timeUnits from vega-lite version 2.4.3
TIMEUNITS = ["utcyear", "utcquarter", "utcmonth", "utcday", "utcdate",
"utchours", "utcminutes", "utcseconds", "utcmilliseconds",
"utcyearquarter", "utcyearquartermonth", "utcyearmonth",
"utcyearmonthdate", "utcyearmonthdatehours",
"utcyearmonthdatehoursminutes",
"utcyearmonthdatehoursminutesseconds",
"utcquartermonth", "utcmonthdate", "utchoursminutes",
"utchoursminutesseconds", "utcminutesseconds",
"utcsecondsmilliseconds",
"year", "quarter", "month", "day", "date", "hours", "minutes",
"seconds", "milliseconds", "yearquarter", "yearquartermonth",
"yearmonth", "yearmonthdate", "yearmonthdatehours",
"yearmonthdatehoursminutes",
"yearmonthdatehoursminutesseconds", "quartermonth", "monthdate",
"hoursminutes", "hoursminutesseconds", "minutesseconds",
"secondsmilliseconds"]
def infer_vegalite_type(data):
"""
From an array-like input, infer the correct vega typecode
('ordinal', 'nominal', 'quantitative', or 'temporal')
Parameters
----------
data: Numpy array or Pandas Series
"""
# Otherwise, infer based on the dtype of the input
typ = infer_dtype(data)
# TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
if typ in ['floating', 'mixed-integer-float', 'integer',
'mixed-integer', 'complex']:
return 'quantitative'
elif typ in ['string', 'bytes', 'categorical', 'boolean', 'mixed', 'unicode']:
return 'nominal'
elif typ in ['datetime', 'datetime64', 'timedelta',
'timedelta64', 'date', 'time', 'period']:
return 'temporal'
else:
warnings.warn("I don't know how to infer vegalite type from '{0}'. "
"Defaulting to nominal.".format(typ))
return 'nominal'
def sanitize_dataframe(df):
"""Sanitize a DataFrame to prepare it for serialization.
* Make a copy
* Raise ValueError if it has a hierarchical index.
* Convert categoricals to strings.
* Convert np.bool_ dtypes to Python bool objects
* Convert np.int dtypes to Python int objects
* Convert floats to objects and replace NaNs/infs with None.
* Convert DateTime dtypes into appropriate string representations
* Raise a ValueError for TimeDelta dtypes
"""
df = df.copy()
if isinstance(df.index, pd.core.index.MultiIndex):
raise ValueError('Hierarchical indices not supported')
if isinstance(df.columns, pd.core.index.MultiIndex):
raise ValueError('Hierarchical indices not supported')
def to_list_if_array(val):
if isinstance(val, np.ndarray):
return val.tolist()
else:
return val
for col_name, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
# XXXX: work around bug in to_json for categorical types
# https://github.com/pydata/pandas/issues/10778
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype) == 'bool':
# convert numpy bools to objects; np.bool is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif str(dtype).startswith('datetime'):
# Convert datetimes to strings. This needs to be a full ISO string
# with time, which is why we cannot use ``col.astype(str)``.
# This is because Javascript parses date-only times in UTC, but
# parses full ISO-8601 dates as local time, and dates in Vega and
# Vega-Lite are displayed in local time by default.
# (see https://github.com/altair-viz/altair/issues/1027)
df[col_name] = df[col_name].apply(lambda x: x.isoformat()).replace('NaT', '')
elif str(dtype).startswith('timedelta'):
raise ValueError('Field "{col_name}" has type "{dtype}" which is '
'not supported by Altair. Please convert to '
'either a timestamp or a numerical value.'
''.format(col_name=col_name, dtype=dtype))
elif np.issubdtype(dtype, np.integer):
# convert integers to objects; np.int is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.floating):
# For floats, convert to Python float: np.float is not JSON serializable
# Also convert NaN/inf values to null, as they are not JSON serializable
col = df[col_name]
bad_values = col.isnull() | np.isinf(col)
df[col_name] = col.astype(object).where(~bad_values, None)
elif dtype == object:
# Convert numpy arrays saved as objects to lists
# Arrays are not JSON serializable
col = df[col_name].apply(to_list_if_array, convert_dtype=False)
df[col_name] = col.where(col.notnull(), None)
return df
def parse_shorthand(shorthand, data=None, parse_aggregates=True,
parse_window_ops=False,
parse_timeunits=True, parse_types=True):
"""General tool to parse shorthand values
These are of the form:
- "col_name"
- "col_name:O"
- "average(col_name)"
- "average(col_name):O"
Optionally, a dataframe may be supplied, from which the type
will be inferred if not specified in the shorthand.
Parameters
----------
shorthand : dict or string
The shorthand representation to be parsed
data : DataFrame, optional
If specified and of type DataFrame, then use these values to infer the
column type if not provided by the shorthand.
parse_aggregates : boolean
If True (default), then parse aggregate functions within the shorthand.
parse_window_ops : boolean
If True then parse window operations within the shorthand (default:False)
parse_timeunits : boolean
If True (default), then parse timeUnits from within the shorthand
parse_types : boolean
If True (default), then parse typecodes within the shorthand
Returns
-------
attrs : dict
a dictionary of attributes extracted from the shorthand
Examples
--------
>>> data = pd.DataFrame({'foo': ['A', 'B', 'A', 'B'],
... 'bar': [1, 2, 3, 4]})
>>> parse_shorthand('name') == {'field': 'name'}
True
>> parse_shorthand('name:Q') == {'field': 'name', 'type': 'quantitative'}
True
>>> parse_shorthand('average(col)') == {'aggregate': 'average', 'field': 'col'}
True
>>> parse_shorthand('foo:O') == {'field': 'foo', 'type': 'ordinal'}
True
>>> parse_shorthand('min(foo):Q') == {'aggregate': 'min', 'field': 'foo', 'type': 'quantitative'}
True
>>> parse_shorthand('month(col)') == {'field': 'col', 'timeUnit': 'month', 'type': 'temporal'}
True
>>> parse_shorthand('year(col):O') == {'field': 'col', 'timeUnit': 'year', 'type': 'ordinal'}
True
>>> parse_shorthand('foo', data) == {'field': 'foo', 'type': 'nominal'}
True
>>> parse_shorthand('bar', data) == {'field': 'bar', 'type': 'quantitative'}
True
>>> parse_shorthand('bar:O', data) == {'field': 'bar', 'type': 'ordinal'}
True
>>> parse_shorthand('sum(bar)', data) == {'aggregate': 'sum', 'field': 'bar', 'type': 'quantitative'}
True
>>> parse_shorthand('count()', data) == {'aggregate': 'count', 'type': 'quantitative'}
True
"""
if not shorthand:
return {}
valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)
units = dict(field='(?P<field>.*)',
type='(?P<type>{0})'.format('|'.join(valid_typecodes)),
count='(?P<aggregate>count)',
aggregate='(?P<aggregate>{0})'.format('|'.join(AGGREGATES)),
window_op='(?P<op>{0})'.format('|'.join(AGGREGATES + WINDOW_AGGREGATES)),
timeUnit='(?P<timeUnit>{0})'.format('|'.join(TIMEUNITS)))
patterns = []
if parse_aggregates or parse_window_ops:
patterns.extend([r'{count}\(\)'])
if parse_aggregates:
patterns.extend([r'{aggregate}\({field}\)'])
if parse_window_ops:
patterns.extend([r'{window_op}\({field}\)'])
if parse_timeunits:
patterns.extend([r'{timeUnit}\({field}\)'])
patterns.extend([r'{field}'])
if parse_types:
patterns = list(itertools.chain(*((p + ':{type}', p) for p in patterns)))
regexps = (re.compile('\A' + p.format(**units) + '\Z', re.DOTALL)
for p in patterns)
# find matches depending on valid fields passed
if isinstance(shorthand, dict):
attrs = shorthand
else:
attrs = next(exp.match(shorthand).groupdict() for exp in regexps
if exp.match(shorthand))
# Handle short form of the type expression
if 'type' in attrs:
attrs['type'] = INV_TYPECODE_MAP.get(attrs['type'], attrs['type'])
# counts are quantitative by default
if attrs == {'aggregate': 'count'}:
attrs['type'] = 'quantitative'
# times are temporal by default
if 'timeUnit' in attrs and 'type' not in attrs:
attrs['type'] = 'temporal'
# if data is specified and type is not, infer type from data
if isinstance(data, pd.DataFrame) and 'type' not in attrs:
if 'field' in attrs and attrs['field'] in data.columns:
attrs['type'] = infer_vegalite_type(data[attrs['field']])
return attrs
def use_signature(Obj):
"""Apply call signature and documentation of Obj to the decorated method"""
def decorate(f):
# call-signature of f is exposed via __wrapped__.
# we want it to mimic Obj.__init__
f.__wrapped__ = Obj.__init__
f._uses_signature = Obj
# Supplement the docstring of f with information from Obj
doclines = Obj.__doc__.splitlines()
if f.__doc__:
doc = f.__doc__ + '\n'.join(doclines[1:])
else:
doc = '\n'.join(doclines)
try:
f.__doc__ = doc
except AttributeError:
# __doc__ is not modifiable for classes in Python < 3.3
pass
return f
return decorate
def update_subtraits(obj, attrs, **kwargs):
"""Recursively update sub-traits without overwriting other traits"""
# TODO: infer keywords from args
if not kwargs:
return obj
# obj can be a SchemaBase object or a dict
if obj is Undefined:
obj = dct = {}
elif isinstance(obj, SchemaBase):
dct = obj._kwds
else:
dct = obj
if isinstance(attrs, six.string_types):
attrs = (attrs,)
if len(attrs) == 0:
dct.update(kwargs)
else:
attr = attrs[0]
trait = dct.get(attr, Undefined)
if trait is Undefined:
trait = dct[attr] = {}
dct[attr] = update_subtraits(trait, attrs[1:], **kwargs)
return obj
def update_nested(original, update, copy=False):
"""Update nested dictionaries
Parameters
----------
original : dict
the original (nested) dictionary, which will be updated in-place
update : dict
the nested dictionary of updates
copy : bool, default False
if True, then copy the original dictionary rather than modifying it
Returns
-------
original : dict
a reference to the (modified) original dict
Examples
--------
>>> original = {'x': {'b': 2, 'c': 4}}
>>> update = {'x': {'b': 5, 'd': 6}, 'y': 40}
>>> update_nested(original, update) # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
>>> original # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
"""
if copy:
original = deepcopy(original)
for key, val in update.items():
if isinstance(val, collections.Mapping):
orig_val = original.get(key, {})
if isinstance(orig_val, collections.Mapping):
original[key] = update_nested(orig_val, val)
else:
original[key] = val
else:
original[key] = val
return original
def write_file_or_filename(fp, content, mode='w'):
"""Write content to fp, whether fp is a string or a file-like object"""
if isinstance(fp, six.string_types):
with open(fp, mode) as f:
f.write(content)
else:
fp.write(content)
def display_traceback(in_ipython=True):
exc_info = sys.exc_info()
if in_ipython:
from IPython.core.getipython import get_ipython
ip = get_ipython()
else:
ip = None
if ip is not None:
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
| 35.037406
| 105
| 0.59879
|
8c6c7365d4f3324c2cb7d68c436f41bfb9d363f4
| 1,492
|
py
|
Python
|
app/obfuscators/base64_jumble.py
|
ManojKorrapati/robinhood
|
c12c136974f9ee42823952f995def203bfb9b5e7
|
[
"Apache-2.0"
] | null | null | null |
app/obfuscators/base64_jumble.py
|
ManojKorrapati/robinhood
|
c12c136974f9ee42823952f995def203bfb9b5e7
|
[
"Apache-2.0"
] | null | null | null |
app/obfuscators/base64_jumble.py
|
ManojKorrapati/robinhood
|
c12c136974f9ee42823952f995def203bfb9b5e7
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
import binascii
from base64 import b64encode
from app.utility.base_obfuscator import BaseObfuscator
class Obfuscation(BaseObfuscator):
@property
def supported_platforms(self):
return dict(
windows=['psh'],
darwin=['sh'],
linux=['sh']
)
def run(self, link, **kwargs):
cmd, extra = self._jumble_command(link.command)
link.command = cmd
return super().run(link, extra=extra)
@staticmethod
def sh(link, **kwargs):
extra_chars = kwargs.get('extra')
return 'eval "$(echo %s | rev | cut -c%s- | rev | base64 --decode)"' % (link.command, extra_chars)
def psh(self, link, **kwargs):
extra_chars = kwargs.get('extra') + 1
try:
recoded = b64encode(self.decode_bytes(link.command).encode('UTF-16LE'))
except binascii.Error: # Resolve issue where we can't decode our own mangled command internally
recoded = b64encode(self.decode_bytes(link.command[:-(extra_chars-1)]).encode('UTF-16LE'))
return 'powershell -Enc %s.Substring(0,%s)' % (recoded.decode('utf-8'), len(link.command)-extra_chars)
""" PRIVATE """
def _jumble_command(self, s):
extra = 0
while self.is_base64(s):
s = s + self._random_char()
extra += 1
return s, extra
@staticmethod
def _random_char():
return random.choice(string.ascii_letters + string.digits)
| 29.84
| 110
| 0.612601
|
1c4a0541e9c6f3dabd3305439a3287d532a147dd
| 1,361
|
py
|
Python
|
app/core/tests/test_admin.py
|
kim-sun/recipe-app-api
|
c0c598f2188c42c820178ea7910c34ccdf641393
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
kim-sun/recipe-app-api
|
c0c598f2188c42c820178ea7910c34ccdf641393
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
kim-sun/recipe-app-api
|
c0c598f2188c42c820178ea7910c34ccdf641393
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@gmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@gmail.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/{id}
res = self.client.get(url) # response
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.651163
| 68
| 0.635562
|
5d99322867bc078254e638019a748163e15b8f86
| 107,218
|
py
|
Python
|
wildlifecompliance/components/applications/api.py
|
Djandwich/wildlifecompliance
|
ca296798526a56ce67ffc2f7e8ebdbae95077e6d
|
[
"Apache-2.0"
] | null | null | null |
wildlifecompliance/components/applications/api.py
|
Djandwich/wildlifecompliance
|
ca296798526a56ce67ffc2f7e8ebdbae95077e6d
|
[
"Apache-2.0"
] | null | null | null |
wildlifecompliance/components/applications/api.py
|
Djandwich/wildlifecompliance
|
ca296798526a56ce67ffc2f7e8ebdbae95077e6d
|
[
"Apache-2.0"
] | null | null | null |
import traceback
import os
import logging
from datetime import datetime, timedelta
from django.db.models import Q
from django.db import transaction
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.exceptions import ValidationError
from rest_framework import viewsets, serializers, status, views
from rest_framework.decorators import (
detail_route, list_route, renderer_classes
)
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from ledger.accounts.models import EmailUser
from ledger.checkout.utils import calculate_excl_gst
from django.urls import reverse
from django.shortcuts import redirect, render
from wildlifecompliance.components.applications.utils import (
SchemaParser,
MissingFieldsException,
)
from wildlifecompliance.components.main.utils import (
checkout,
set_session_application,
set_session_activity,
delete_session_application
)
from wildlifecompliance.helpers import is_customer, is_internal
from wildlifecompliance.components.applications.email import (
send_application_amendment_notification,
)
from wildlifecompliance.components.applications.models import (
Application,
ApplicationSelectedActivity,
ApplicationCondition,
ApplicationStandardCondition,
Assessment,
ActivityPermissionGroup,
AmendmentRequest,
ApplicationUserAction,
ApplicationFormDataRecord,
ApplicationInvoice,
ApplicationSelectedActivityPurpose,
)
from wildlifecompliance.components.applications.services import (
ApplicationService,
CheckboxAndRadioButtonVisitor,
SpeciesOptionsFieldElement,
StandardConditionFieldElement,
PromptInspectionFieldElement,
TSCSpecieService,
HerbieSpecieKMICall,
)
from wildlifecompliance.components.applications.serializers import (
ApplicationSerializer,
InternalApplicationSerializer,
SaveApplicationSerializer,
BaseApplicationSerializer,
CreateExternalApplicationSerializer,
DTInternalApplicationSerializer,
DTExternalApplicationSerializer,
ApplicationUserActionSerializer,
ApplicationLogEntrySerializer,
ApplicationConditionSerializer,
ApplicationStandardConditionSerializer,
ProposedLicenceSerializer,
ProposedDeclineSerializer,
AssessmentSerializer,
ActivityPermissionGroupSerializer,
SaveAssessmentSerializer,
SimpleSaveAssessmentSerializer,
AmendmentRequestSerializer,
ApplicationProposedIssueSerializer,
DTAssessmentSerializer,
ApplicationSelectedActivitySerializer,
ValidCompleteAssessmentSerializer,
DTExternalApplicationSelectedActivitySerializer,
DTInternalApplicationSelectedActivitySerializer,
IssueLicenceSerializer,
DTApplicationSelectSerializer,
)
from wildlifecompliance.components.main.process_document import (
process_generic_document,
)
from rest_framework_datatables.pagination import DatatablesPageNumberPagination
from rest_framework_datatables.filters import DatatablesFilterBackend
from rest_framework_datatables.renderers import DatatablesRenderer
from wildlifecompliance.management.permissions_manager import PermissionUser
logger = logging.getLogger(__name__)
# logger = logging
def application_refund_callback(invoice_ref, bpoint_tid):
'''
Callback routine for Ledger when refund transaction.
Required to update payment status on application as this property is
cached and can only be updated on save.
'''
logger.info(
'application_refund_callback: Inv {0}'.format(invoice_ref)
)
AMENDMENT = Application.APPLICATION_TYPE_AMENDMENT
DISCARDED = Application.CUSTOMER_STATUS_DRAFT
try:
ai = ApplicationInvoice.objects.filter(
invoice_reference=invoice_ref
)
with transaction.atomic():
for i in ai:
'''
Check where invoice is for an amendment application as refunds
are paid back to previous application invoice - will apply a
save on both applications.
'''
amend = Application.objects.filter(
previous_application_id=i.application_id,
application_type=AMENDMENT,
).exclude(
customer_status=DISCARDED,
).first()
if (amend):
logger.info('refund_callback amendID {0}'.format(amend))
amend.set_property_cache_refund_invoice(ai)
amend.save()
i.application.set_property_cache_refund_invoice(ai)
i.application.save()
except Exception as e:
logger.error(
'app_refund_callback(): Inv {0} - {1}'.format(invoice_ref, e)
)
def application_invoice_callback(invoice_ref):
'''
Callback routine for Ledger when record transaction.
Required to update payment status on application as this property is
cached and can only be updated on save.
'''
logger.info(
'application_invoice_callback: Inv {0}'.format(invoice_ref)
)
AMENDMENT = Application.APPLICATION_TYPE_AMENDMENT
CASH = ApplicationInvoice.OTHER_PAYMENT_METHOD_CASH
DISCARDED = Application.CUSTOMER_STATUS_DRAFT
try:
ai = ApplicationInvoice.objects.filter(
invoice_reference=invoice_ref
)
with transaction.atomic():
for i in ai:
'''
Check for cash payment invoices on amendments as refunds are
recorded causing the invoice_callback() to be applied. Save on
both applications.
NOTE: cannot apply a ledger refund to an invoice for recorded
cash payment - can only be recorded as a refund amount.
'''
if i.other_payment_method == CASH:
amend = Application.objects.filter(
previous_application_id=i.application_id,
application_type=AMENDMENT,
).exclude(
customer_status=DISCARDED,
).first()
if amend and amend.requires_refund_amendment():
logger.info('inv_callback amendID {0}'.format(amend))
amend.set_property_cache_refund_invoice(ai)
amend.save()
if int(i.application.application_fee) < 0:
i.application.set_property_cache_refund_invoice(ai)
i.application.save()
except Exception as e:
logger.error(
'app_invoice_callback(): Inv {0} - {1}'.format(invoice_ref, e)
)
class GetEmptyList(views.APIView):
renderer_classes = [JSONRenderer, ]
def get(self, request, format=None):
return Response([])
class ApplicationFilterBackend(DatatablesFilterBackend):
"""
Custom filters
"""
def filter_queryset(self, request, queryset, view):
# Get built-in DRF datatables queryset first to join with search text,
# then apply additional filters.
super_queryset = super(ApplicationFilterBackend, self).filter_queryset(
request, queryset, view
).distinct()
total_count = queryset.count()
date_from = request.GET.get('date_from')
date_to = request.GET.get('date_to')
category_name = request.GET.get('category_name')
processing_status = request.GET.get('processing_status')
customer_status = request.GET.get('customer_status')
status_filter = request.GET.get('status')
submitter = request.GET.get('submitter')
search_text = request.GET.get('search[value]')
if queryset.model is Application:
# search_text filter, join all custom search columns
# where ('searchable: false' in the datatable defintion)
if search_text:
search_text = search_text.lower()
# join queries for the search_text search
# search_text_app_ids = []
search_text_app_ids = Application.objects.values(
'id'
).filter(
Q(proxy_applicant__first_name__icontains=search_text) |
Q(proxy_applicant__last_name__icontains=search_text)
)
# use pipe to join both custom and built-in DRF datatables
# querysets (returned by super call above)
# (otherwise they will filter on top of each other)
queryset = queryset.filter(
id__in=search_text_app_ids
).distinct() | super_queryset
# apply user selected filters
category_name = category_name.lower() if category_name else 'all'
if category_name != 'all':
# category_name_app_ids = []
category_name_app_ids = Application.objects.values(
'id'
).filter(
selected_activities__licence_activity__licence_category__name__icontains=category_name
)
queryset = queryset.filter(id__in=category_name_app_ids)
processing_status = processing_status.lower() if processing_status else 'all'
if processing_status != 'all':
if processing_status \
== Application.CUSTOMER_STATUS_UNDER_REVIEW:
exclude = [
ApplicationSelectedActivity.PROCESSING_STATUS_DRAFT,
ApplicationSelectedActivity.PROCESSING_STATUS_AWAITING_LICENCE_FEE_PAYMENT,
ApplicationSelectedActivity.PROCESSING_STATUS_ACCEPTED,
ApplicationSelectedActivity.PROCESSING_STATUS_DECLINED,
ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED,
]
processing_status_app_ids = Application.objects.values(
'id'
).filter().exclude(
selected_activities__processing_status__in=exclude,
)
elif processing_status \
== Application.CUSTOMER_STATUS_AWAITING_PAYMENT:
include = [
ApplicationSelectedActivity.PROCESSING_STATUS_AWAITING_LICENCE_FEE_PAYMENT,
]
processing_status_app_ids = Application.objects.values(
'id'
).filter(
selected_activities__processing_status__in=include,
)
elif processing_status \
== Application.CUSTOMER_STATUS_PARTIALLY_APPROVED:
include = [
Application.CUSTOMER_STATUS_PARTIALLY_APPROVED,
]
processing_status_app_ids = Application.objects.values(
'id'
).filter(
customer_status__in=include,
)
else:
processing_status_app_ids = Application.objects.values(
'id'
).filter(
selected_activities__processing_status__in=[
processing_status
]
)
queryset = queryset.filter(id__in=processing_status_app_ids)
customer_status = customer_status.lower() if customer_status else 'all'
if customer_status != 'all':
customer_status_app_ids = []
for application in queryset:
if customer_status in application.customer_status.lower():
customer_status_app_ids.append(application.id)
queryset = queryset.filter(id__in=customer_status_app_ids)
if date_from:
queryset = queryset.filter(lodgement_date__gte=date_from)
if date_to:
date_to = datetime.strptime(date_to, '%Y-%m-%d') + timedelta(days=1)
queryset = queryset.filter(lodgement_date__lte=date_to)
submitter = submitter.lower() if submitter else 'all'
if submitter != 'all':
queryset = queryset.filter(submitter__email__iexact=submitter)
if queryset.model is Assessment:
# search_text filter, join all custom search columns
# where ('searchable: false' in the datatable definition)
if search_text:
search_text = search_text.lower()
# join queries for the search_text search
search_text_ass_ids = []
for assessment in queryset:
if (search_text in assessment.application.licence_category.lower()
or search_text in assessment.licence_activity.short_name.lower()
or search_text in assessment.application.applicant.lower()
or search_text in assessment.get_status_display().lower()
):
search_text_ass_ids.append(assessment.id)
# if applicant is not an organisation, also search against the user's email address
if (assessment.application.applicant_type == Application.APPLICANT_TYPE_PROXY and
search_text in assessment.application.proxy_applicant.email.lower()):
search_text_ass_ids.append(assessment.id)
if (assessment.application.applicant_type == Application.APPLICANT_TYPE_SUBMITTER and
search_text in assessment.application.submitter.email.lower()):
search_text_ass_ids.append(assessment.id)
# use pipe to join both custom and built-in DRF datatables querysets (returned by super call above)
# (otherwise they will filter on top of each other)
queryset = queryset.filter(id__in=search_text_ass_ids).distinct() | super_queryset
# apply user selected filters
category_name = category_name.lower() if category_name else 'all'
if category_name != 'all':
category_name_app_ids = []
for assessment in queryset:
if category_name in assessment.application.licence_category_name.lower():
category_name_app_ids.append(assessment.id)
queryset = queryset.filter(id__in=category_name_app_ids)
status_filter = status_filter.lower() if status_filter else 'all'
if status_filter != 'all':
queryset = queryset.filter(status=status_filter)
if date_from:
queryset = queryset.filter(application__lodgement_date__gte=date_from)
if date_to:
date_to = datetime.strptime(date_to, '%Y-%m-%d') + timedelta(days=1)
queryset = queryset.filter(application__lodgement_date__lte=date_to)
submitter = submitter.lower() if submitter else 'all'
if submitter != 'all':
queryset = queryset.filter(application__submitter__email__iexact=submitter)
# override queryset ordering, required because the ordering is usually handled
# in the super call, but is then clobbered by the custom queryset joining above
# also needed to disable ordering for all fields for which data is not an
# Application model field, as property functions will not work with order_by
getter = request.query_params.get
fields = self.get_fields(getter)
ordering = self.get_ordering(getter, fields)
if len(ordering):
queryset = queryset.order_by(*ordering)
setattr(view, '_datatables_total_count', total_count)
return queryset
class ApplicationRenderer(DatatablesRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
if 'view' in renderer_context and hasattr(renderer_context['view'], '_datatables_total_count'):
data['recordsTotal'] = renderer_context['view']._datatables_total_count
return super(ApplicationRenderer, self).render(data, accepted_media_type, renderer_context)
class ApplicationPaginatedViewSet(viewsets.ModelViewSet):
filter_backends = (ApplicationFilterBackend,)
pagination_class = DatatablesPageNumberPagination
renderer_classes = (ApplicationRenderer,)
queryset = Application.objects.none()
serializer_class = DTExternalApplicationSerializer
page_size = 10
def get_queryset(self):
user = self.request.user
if is_internal(self.request):
return Application.objects.all()\
.exclude(application_type=Application.APPLICATION_TYPE_SYSTEM_GENERATED)
elif is_customer(self.request):
user_orgs = [
org.id for org in user.wildlifecompliance_organisations.all()]
return Application.objects.filter(Q(org_applicant_id__in=user_orgs) | Q(
proxy_applicant=user) | Q(submitter=user))\
.exclude(application_type=Application.APPLICATION_TYPE_SYSTEM_GENERATED)
return Application.objects.none()
@list_route(methods=['GET', ])
def internal_datatable_list(self, request, *args, **kwargs):
self.serializer_class = DTInternalApplicationSerializer
queryset = self.get_queryset()
# Filter by org
org_id = request.GET.get('org_id', None)
if org_id:
queryset = queryset.filter(org_applicant_id=org_id)
# Filter by proxy_applicant
proxy_applicant_id = request.GET.get('proxy_applicant_id', None)
if proxy_applicant_id:
queryset = queryset.filter(proxy_applicant_id=proxy_applicant_id)
# Filter by submitter
submitter_id = request.GET.get('submitter_id', None)
if submitter_id:
queryset = queryset.filter(submitter_id=submitter_id)
# Filter by user (submitter or proxy_applicant)
user_id = request.GET.get('user_id', None)
if user_id:
user_orgs = [
org.id for org in EmailUser.objects.get(id=user_id).wildlifecompliance_organisations.all()]
queryset = queryset.filter(
Q(proxy_applicant=user_id) |
Q(submitter=user_id) |
Q(org_applicant_id__in=user_orgs)
)
queryset = self.filter_queryset(queryset)
self.paginator.page_size = queryset.count()
result_page = self.paginator.paginate_queryset(queryset, request)
serializer = DTInternalApplicationSerializer(result_page, context={'request': request}, many=True)
response = self.paginator.get_paginated_response(serializer.data)
return response
@list_route(methods=['GET', ])
def external_datatable_list(self, request, *args, **kwargs):
queryset = self.get_queryset()
# Filter by org
org_id = request.GET.get('org_id', None)
if org_id:
queryset = queryset.filter(org_applicant_id=org_id)
# Filter by proxy_applicant
proxy_applicant_id = request.GET.get('proxy_applicant_id', None)
if proxy_applicant_id:
queryset = queryset.filter(proxy_applicant_id=proxy_applicant_id)
# Filter by submitter
submitter_id = request.GET.get('submitter_id', None)
if submitter_id:
queryset = queryset.filter(submitter_id=submitter_id)
self.serializer_class = DTExternalApplicationSerializer
user_orgs = [
org.id for org in request.user.wildlifecompliance_organisations.all()]
queryset = self.get_queryset().filter(
Q(submitter=request.user) |
Q(proxy_applicant=request.user) |
Q(org_applicant_id__in=user_orgs)
).computed_exclude(
processing_status=Application.PROCESSING_STATUS_DISCARDED
).distinct()
queryset = self.filter_queryset(queryset)
self.paginator.page_size = queryset.count()
result_page = self.paginator.paginate_queryset(queryset, request)
serializer = DTExternalApplicationSerializer(result_page, context={'request': request}, many=True)
return self.paginator.get_paginated_response(serializer.data)
class ApplicationViewSet(viewsets.ModelViewSet):
queryset = Application.objects.all()
serializer_class = ApplicationSerializer
def get_queryset(self):
user = self.request.user
if is_internal(self.request):
return Application.objects.all()
elif is_customer(self.request):
user_orgs = [
org.id for org in user.wildlifecompliance_organisations.all()]
return Application.objects.filter(Q(org_applicant_id__in=user_orgs) | Q(
proxy_applicant=user) | Q(submitter=user))
return Application.objects.none()
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = BaseApplicationSerializer(
queryset, many=True, context={'request': request})
return Response(serializer.data)
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
def process_document(self, request, *args, **kwargs):
try:
instance = self.get_object()
action = request.POST.get('action')
section = request.POST.get('input_name')
if action == 'list' and 'input_name' in request.POST:
pass
elif action == 'delete' and 'document_id' in request.POST:
document_id = request.POST.get('document_id')
document = instance.documents.get(id=document_id)
if document._file and os.path.isfile(
document._file.path) and document.can_delete:
os.remove(document._file.path)
document.delete()
instance.save(version_comment='Approval File Deleted: {}'.format(
document.name)) # to allow revision to be added to reversion history
elif action == 'save' and 'input_name' in request.POST and 'filename' in request.POST:
application_id = request.POST.get('application_id')
filename = request.POST.get('filename')
_file = request.POST.get('_file')
if not _file:
_file = request.FILES.get('_file')
document = instance.documents.get_or_create(
input_name=section, name=filename)[0]
path = default_storage.save(
'applications/{}/documents/{}'.format(
application_id, filename), ContentFile(
_file.read()))
document._file = path
document.save()
# to allow revision to be added to reversion history
instance.save(
version_comment='File Added: {}'.format(filename))
return Response(
[
dict(
input_name=d.input_name,
name=d.name,
file=d._file.url,
id=d.id,
can_delete=d.can_delete) for d in instance.documents.filter(
input_name=section) if d._file])
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
# raise serializers.ValidationError(repr(e[0].encode('utf-8')))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def action_log(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.action_logs.all()
serializer = ApplicationUserActionSerializer(qs, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def comms_log(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.comms_logs.all()
serializer = ApplicationLogEntrySerializer(qs, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
@renderer_classes((JSONRenderer,))
def add_comms_log(self, request, *args, **kwargs):
try:
with transaction.atomic():
instance = self.get_object()
request_data = request.data.copy()
request_data['application'] = u'{}'.format(instance.id)
request_data['staff'] = u'{}'.format(request.user.id)
request_data['log_type'] = request.data['type']
serializer = ApplicationLogEntrySerializer(data=request_data)
serializer.is_valid(raise_exception=True)
comms = serializer.save()
# Save the files
for f in request.FILES:
document = comms.documents.create()
document.name = str(request.FILES[f])
document._file = request.FILES[f]
document.save()
# End Save Documents
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def get_application_selects(self, request, *args, **kwargs):
'''
Returns all drop-down lists for application dashboard.
'''
try:
instance = Application.objects.last()
serializer = DTApplicationSelectSerializer(
instance, context={'is_internal': is_internal(request)}
)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def conditions(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.conditions.all()
licence_activity = self.request.query_params.get(
'licence_activity', None)
if licence_activity is not None:
qs = qs.filter(licence_activity=licence_activity)
serializer = ApplicationConditionSerializer(
qs, many=True, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def assessments(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.assessments
serializer = AssessmentSerializer(qs, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def assign_application_assessment(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.assign_application_assessment(request)
serializer = InternalApplicationSerializer(
instance, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def complete_application_assessments(self, request, *args, **kwargs):
try:
validator = ValidCompleteAssessmentSerializer(data=request.data)
validator.is_valid(raise_exception=True)
instance = self.get_object()
instance.complete_application_assessments_by_user(request)
serializer = InternalApplicationSerializer(
instance, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def add_assessment_inspection(self, request, *args, **kwargs):
try:
instance = self.get_object()
for assessment in instance.assessments:
if assessment.licence_activity.id == \
request.data.get('licence_activity_id'):
assessment.add_inspection(request)
serializer = InternalApplicationSerializer(
instance, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@list_route(methods=['GET', ])
def active_licence_application(self, request, *args, **kwargs):
active_application = Application.get_first_active_licence_application(
request
)
if not active_application:
return Response({'application': None})
serializer = DTExternalApplicationSerializer(
active_application, context={'request': request})
return Response({'application': serializer.data})
@list_route(methods=['POST', ])
def estimate_price(self, request, *args, **kwargs):
purpose_ids = request.data.get('purpose_ids', [])
application_id = request.data.get('application_id')
licence_type = request.data.get('licence_type')
with transaction.atomic():
if application_id is not None:
application = Application.objects.get(id=application_id)
return Response({
'fees': ApplicationService.calculate_fees(
application, request.data.get('field_data', {}))
})
return Response({
'fees': Application.calculate_base_fees(
purpose_ids, licence_type)
})
@list_route(methods=['GET', ])
def internal_datatable_list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = DTInternalApplicationSerializer(
queryset, many=True, context={'request': request})
return Response(serializer.data)
@list_route(methods=['GET', ])
def user_list(self, request, *args, **kwargs):
user_orgs = [
org.id for org in request.user.wildlifecompliance_organisations.all()]
queryset = self.get_queryset().filter(
Q(submitter=request.user) |
Q(proxy_applicant=request.user) |
Q(org_applicant_id__in=user_orgs)
).computed_exclude(
processing_status=Application.PROCESSING_STATUS_DISCARDED
).distinct()
serializer = DTExternalApplicationSerializer(
queryset, many=True, context={'request': request})
return Response(serializer.data)
@detail_route(methods=['GET', ])
def internal_application(self, request, *args, **kwargs):
logger.debug('ApplicationViewSet.internal_application() - start')
instance = self.get_object()
serializer = InternalApplicationSerializer(
instance, context={'request': request})
response = Response(serializer.data)
logger.debug('ApplicationViewSet.internal_application() - end')
return response
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def submit(self, request, *args, **kwargs):
try:
instance = self.get_object()
try:
instance.submit(request)
except MissingFieldsException as e:
return Response({
'missing': e.error_list},
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
delete_session_application(request.session)
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
delete_session_application(request.session)
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def application_fee_checkout(self, request, *args, **kwargs):
import decimal
try:
checkout_result = None
instance = self.get_object()
with transaction.atomic():
product_lines = []
licence_fee = decimal.Decimal(
instance.get_property_cache_licence_fee() * 1)
if instance.application_fee < 1 and licence_fee < 1:
raise Exception('Checkout request for zero amount.')
application_submission = u'Application No: {}'.format(
instance.lodgement_number
)
set_session_application(request.session, instance)
product_lines = ApplicationService.get_product_lines(instance)
checkout_result = checkout(
request, instance, lines=product_lines,
invoice_text=application_submission
)
return checkout_result
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def application_fee_reception(self, request, *args, **kwargs):
'''
Process to pay application fee and record by licensing reception.
'''
try:
instance = self.get_object()
with transaction.atomic():
session = request.session
set_session_application(session, instance)
if instance.submit_type == Application.SUBMIT_TYPE_PAPER:
invoice = ApplicationService.cash_payment_submission(
request)
invoice_url = request.build_absolute_uri(
reverse(
'payments:invoice-pdf',
kwargs={'reference': invoice}))
elif instance.submit_type == Application.SUBMIT_TYPE_MIGRATE:
invoice = ApplicationService.none_payment_submission(
request)
invoice_url = None
else:
raise Exception('Cannot make this type of payment.')
# return template application-success
template_name = 'wildlifecompliance/application_success.html'
context = {
'application': instance,
'invoice_ref': invoice,
'invoice_url': invoice_url
}
return render(request, template_name, context)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def licence_fee_checkout(self, request, *args, **kwargs):
from wildlifecompliance.components.applications.payments import (
LicenceFeeClearingInvoice,
ApplicationFeePolicy,
)
PAY_STATUS = ApplicationSelectedActivity.PROCESSING_STATUS_AWAITING_LICENCE_FEE_PAYMENT
try:
instance = self.get_object()
activity_id = request.data.get('activity_id')
if not activity_id:
raise Exception('No activity selected for payment!')
product_lines = []
application_submission = u'Application No: {}'.format(
instance.lodgement_number)
activities = instance.selected_activities.all()
# store first activity on session for id.
set_session_activity(request.session, activities[0])
# Adjustments occuring only to the application fee.
# if instance.has_adjusted_fees or instance.has_additional_fees \
if instance.has_additional_fees \
or instance.has_payable_fees_at_finalisation:
# activities = instance.amended_activities
# only fees awaiting payment
activities_pay = [
a for a in activities if a.processing_status == PAY_STATUS
]
# only fees with adjustments or additional fee.
activities_adj = [
a for a in activities_pay
# if a.has_adjusted_application_fee
if a.has_payable_fees_at_issue
or a.has_adjusted_licence_fee
or a.has_additional_fee
]
# only fees which are greater than zero.
for activity in activities_adj:
# Check if refund is required and can be included.
clear_inv = LicenceFeeClearingInvoice(instance)
paid_purposes = [
p for p in activity.proposed_purposes.all()
if p.is_payable
]
for p in paid_purposes:
oracle_code = p.purpose.oracle_account_code
fee = p.get_payable_application_fee()
if fee > 0:
price_excl = calculate_excl_gst(fee)
if ApplicationFeePolicy.GST_FREE:
price_excl = fee
product_lines.append(
{
'ledger_description': '{} {}'.format(
p.purpose.name,
'(Application Fee)'
),
'quantity': 1,
'price_incl_tax': str(fee),
'price_excl_tax': str(price_excl),
'oracle_code': oracle_code
}
)
fee = p.get_payable_licence_fee()
if fee > 0:
price_excl = calculate_excl_gst(fee)
if ApplicationFeePolicy.GST_FREE:
price_excl = fee
product_lines.append(
{
'ledger_description': '{} {}'.format(
p.purpose.name,
'(Licence Fee)'
),
'quantity': 1,
'price_incl_tax': str(fee),
'price_excl_tax': str(price_excl),
'oracle_code': oracle_code
}
)
fee = p.additional_fee
if fee > 0:
price_excl = calculate_excl_gst(fee)
if ApplicationFeePolicy.GST_FREE:
price_excl = fee
product_lines.append(
{
'ledger_description': '{}'.format(
p.additional_fee_text,
),
'quantity': 1,
'price_incl_tax': str(fee),
'price_excl_tax': str(price_excl),
'oracle_code': oracle_code
}
)
if clear_inv.is_refundable:
product_lines.append(
clear_inv.get_product_line_refund_for(p)
)
checkout_result = checkout(
request, instance,
lines=product_lines,
invoice_text=application_submission,
add_checkout_params={
'return_url': request.build_absolute_uri(
reverse('external-licence-fee-success-invoice'))
},
)
return checkout_result
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def accept_id_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.accept_id_check(request)
return Response(
{'id_check_status': instance.id_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def reset_id_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.reset_id_check(request)
return Response(
{'id_check_status': instance.id_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def request_id_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.request_id_check(request)
return Response(
{'id_check_status': instance.id_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def get_activities(self, request, *args, **kwargs):
try:
instance = self.get_object()
if is_internal(request):
serializer = DTInternalApplicationSelectedActivitySerializer(
instance.activities, many=True)
if is_customer(request):
serializer = DTExternalApplicationSelectedActivitySerializer(
instance.activities, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def accept_character_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.accept_character_check(request)
return Response(
{'character_check_status': instance.character_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def reset_character_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.reset_character_check(request)
return Response(
{'character_check_status': instance.character_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def accept_return_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.accept_return_check(request)
return Response(
{'return_check_status': instance.return_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def reset_return_check(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.reset_return_check(request)
return Response(
{'return_check_status': instance.return_check_status},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def last_current_activity(self, request, *args, **kwargs):
'''
NOTE: retrieval of last current activity is only utilised in the
Reissuing process. Filtered on this action.
'''
instance = self.get_object()
user = request.user
if user not in instance.licence_officers:
raise serializers.ValidationError(
'You are not authorised for this application.')
if not instance:
return Response({'activity': None})
current = ApplicationSelectedActivity.ACTIVITY_STATUS_CURRENT
last_activity = instance.get_current_activity_chain(
activity_status=current,
decision_action='reissue'
).first()
if not last_activity:
return Response({'activity': None})
serializer = ApplicationSelectedActivitySerializer(
last_activity, context={'request': request})
return Response({'activity': serializer.data})
@detail_route(methods=['POST', ])
def assign_to_me(self, request, *args, **kwargs):
try:
instance = self.get_object()
user = request.user
if user not in instance.licence_officers:
raise serializers.ValidationError(
'You are not in any relevant licence officer groups for this application.')
instance.assign_officer(request, request.user)
return Response(
{'assigned_officer_id': user.id},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def assign_officer(self, request, *args, **kwargs):
try:
instance = self.get_object()
user_id = request.data.get('officer_id', None)
user = None
if not user_id:
raise serializers.ValidationError('An officer id is required')
try:
user = EmailUser.objects.get(id=user_id)
except EmailUser.DoesNotExist:
raise serializers.ValidationError(
'A user with the id passed in does not exist')
if not request.user.has_perm('wildlifecompliance.licensing_officer'):
raise serializers.ValidationError(
'You are not authorised to assign officers to applications')
if user not in instance.licence_officers:
raise serializers.ValidationError(
'User is not in any relevant licence officer groups for this application')
instance.assign_officer(request, user)
return Response(
{'assigned_officer_id': user.id},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def unassign_officer(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.unassign_officer(request)
return Response(
{'assigned_officer_id': None},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def make_me_activity_approver(self, request, *args, **kwargs):
try:
activity_id = request.data.get('activity_id', None)
instance = self.get_object()
me = request.user
if me not in instance.licence_approvers:
raise serializers.ValidationError('You are not in any relevant \
licence approver groups for this application.')
instance.set_activity_approver(activity_id, me)
return Response(
{'assigned_approver_id': me.id},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def assign_activity_approver(self, request, *args, **kwargs):
try:
instance = self.get_object()
activity_id = request.data.get('activity_id', None)
approver_id = request.data.get('approver_id', None)
approver = None
if not approver_id:
raise serializers.ValidationError('Could not Assign Approver.')
try:
approver = EmailUser.objects.get(id=approver_id)
except EmailUser.DoesNotExist:
raise serializers.ValidationError('A user with the id passed in\
does not exist.')
if not request.user.has_perm('wildlifecompliance.issuing_officer'):
raise serializers.ValidationError('You are not authorised to\
assign approvers for application activity.')
if approver not in instance.licence_approvers:
raise serializers.ValidationError('User is not in any relevant\
licence approver groups for application activity.')
instance.set_activity_approver(activity_id, approver)
return Response(
{'assigned_approver_id': approver.id},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def unassign_activity_approver(self, request, *args, **kwargs):
try:
instance = self.get_object()
activity_id = request.data.get('activity_id', None)
instance.set_activity_approver(activity_id, None)
return Response(
{'assigned_approver_id': None},
status=status.HTTP_200_OK
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def return_to_officer(self, request, *args, **kwargs):
try:
instance = self.get_object()
activity_id = request.data.get('activity_id')
if not activity_id:
raise serializers.ValidationError(
'Activity ID is required!')
instance.return_to_officer_conditions(request, activity_id)
serializer = InternalApplicationSerializer(
instance, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
# raise serializers.ValidationError(repr(e[0].encode('utf-8')))
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def update_licence_type_data(self, request, *args, **kwargs):
'''
Update the Licence Type Data on the application to set the status for
a selected Licence Activity.
NOTE: there is no check whether user has correct privileges.
'''
PROCESS = 'process'
ASSESS = 'assess'
try:
instance = self.get_object()
licence_activity_id = request.data.get('licence_activity_id', None)
workflow = request.data.get('licence_activity_workflow', None)
if not workflow or not licence_activity_id:
raise serializers.ValidationError(
'Activity workflow and activity id is required')
if workflow.lower() == PROCESS:
instance.set_activity_processing_status(
licence_activity_id,
ApplicationSelectedActivity.PROCESSING_STATUS_WITH_OFFICER,
)
elif workflow.lower() == ASSESS:
instance.set_activity_processing_status(
licence_activity_id,
ApplicationSelectedActivity.PROCESSING_STATUS_OFFICER_CONDITIONS,
)
serializer = InternalApplicationSerializer(
instance,
context={'request': request}
)
response = Response(serializer.data)
return response
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def complete_assessment(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.complete_assessment(request)
serializer = InternalApplicationSerializer(
instance, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def proposed_licence(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = ProposedLicenceSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance.proposed_licence(request, serializer.validated_data)
return Response({'success': True})
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def get_proposed_decisions(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.get_proposed_decisions(request)
serializer = ApplicationProposedIssueSerializer(qs, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def assessment_data(self, request, *args, **kwargs):
'''
Process assessment data for officer management by setting the workflow
status to Officer with Conditions.
NOTE: there is no check whether user has correct privileges.
:param __assess is a boolean indicating whether assessing or viewing.
:param __licence_activity is Licence Activity identifier.
:return updated instance.licence_type_data property.
'''
logger.debug('assessment_data()')
STAT = ApplicationSelectedActivity.PROCESSING_STATUS_OFFICER_CONDITIONS
correct_status = [
ApplicationSelectedActivity.PROCESSING_STATUS_WITH_OFFICER,
]
try:
instance = self.get_object()
assess = request.data.pop('__assess', False)
licence_activity_id = request.data.pop('__licence_activity', None)
with transaction.atomic():
is_initial_assess = instance.get_property_cache_assess()
if assess or is_initial_assess:
checkbox = CheckboxAndRadioButtonVisitor(
instance, request.data
)
# Set StandardCondition Fields.
for_condition_fields = StandardConditionFieldElement()
for_condition_fields.accept(checkbox)
# Set PromptInspection Fields.
for_inspection_fields = PromptInspectionFieldElement()
for_inspection_fields.accept(checkbox)
if is_initial_assess:
instance.set_property_cache_assess(False)
instance.save()
selected_activity = instance.get_selected_activity(
licence_activity_id
)
if selected_activity.processing_status in correct_status:
instance.set_activity_processing_status(
licence_activity_id,
STAT,
)
logger.debug('assessment_data() - response success')
serializer = InternalApplicationSerializer(
instance,
context={'request': request}
)
response = Response(serializer.data)
return response
except MissingFieldsException as e:
return Response({
'missing': e.error_list},
status=status.HTTP_400_BAD_REQUEST
)
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def final_decision_data(self, request, *args, **kwargs):
try:
instance = self.get_object()
with transaction.atomic():
checkbox = CheckboxAndRadioButtonVisitor(
instance, request.data
)
# Set species Fields for Checkbox and RadioButtons.
# save on purpose approval.
for_species_options_fields = SpeciesOptionsFieldElement()
for_species_options_fields.accept(checkbox)
return Response({'success': True})
except MissingFieldsException as e:
return Response({
'missing': e.error_list},
status=status.HTTP_400_BAD_REQUEST
)
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def final_decision(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = IssueLicenceSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance.final_decision(request)
return Response({'success': True})
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def proposed_decline(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = ProposedDeclineSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance.proposed_decline(request, serializer.validated_data)
serializer = InternalApplicationSerializer(
instance, context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def draft(self, request, *args, **kwargs):
parser = SchemaParser(draft=True)
try:
instance = self.get_object()
parser.save_application_user_data(instance, request, self)
return redirect(reverse('external'))
except MissingFieldsException as e:
return Response({
'missing': e.error_list},
status=status.HTTP_400_BAD_REQUEST
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def officer_comments(self, request, *args, **kwargs):
try:
instance = self.get_object()
with transaction.atomic():
ApplicationService.process_form(
request,
instance,
request.data,
action=ApplicationFormDataRecord.ACTION_TYPE_ASSIGN_COMMENT
)
return Response({'success': True})
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def form_data(self, request, *args, **kwargs):
logger.debug('form_data()')
try:
instance = self.get_object()
is_submit = self.request.data.pop('__submit', False)
if is_submit:
action = ApplicationFormDataRecord.ACTION_TYPE_ASSIGN_SUBMIT
else:
action = ApplicationFormDataRecord.ACTION_TYPE_ASSIGN_VALUE
with transaction.atomic():
ApplicationService.process_form(
request,
instance,
request.data,
action=action
)
instance.log_user_action(
ApplicationUserAction.ACTION_SAVE_APPLICATION.format(
instance.lodgement_number
), request)
logger.debug('form_data() - successful response')
return Response({'success': True})
except MissingFieldsException as e:
return Response({
'missing': e.error_list},
status=status.HTTP_400_BAD_REQUEST
)
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['get'])
def select_filtered_species(self, request, *args, **kwargs):
try:
instance = self.get_object()
category = instance.licence_category
filter_str = request.query_params['term']
tsc_service = TSCSpecieService(HerbieSpecieKMICall())
data = tsc_service.search_filtered_taxon(filter_str, category)
return Response(data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['post'])
@renderer_classes((JSONRenderer,))
def application_officer_save(self, request, *args, **kwargs):
try:
instance = self.get_object()
parser = SchemaParser()
parser.save_application_officer_data(instance, request, self)
return redirect(reverse('external'))
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@renderer_classes((JSONRenderer,))
def create(self, request, *args, **kwargs):
from wildlifecompliance.components.licences.models import (
WildlifeLicence, LicencePurpose
)
from wildlifecompliance.components.applications.payments import (
ApplicationFeePolicy,
)
try:
org_applicant = request.data.get('organisation_id')
proxy_applicant = request.data.get('proxy_id')
licence_purposes = request.data.get('licence_purposes')
application_type = request.data.get('application_type')
customer_pay_method = request.data.get('customer_method_id')
# Amendment to licence purpose requires the selected activity it
# belongs to - allows for multiple purposes of same type.
selected_activity = request.data.get('selected_activity', None)
selected_purpose = request.data.get('selected_purpose', None)
# establish the submit type from the payment method.
CASH = ApplicationInvoice.OTHER_PAYMENT_METHOD_CASH
NONE = ApplicationInvoice.OTHER_PAYMENT_METHOD_NONE
if customer_pay_method == CASH:
submit_type = Application.SUBMIT_TYPE_PAPER
elif customer_pay_method == NONE:
submit_type = Application.SUBMIT_TYPE_MIGRATE
else:
submit_type = Application.SUBMIT_TYPE_ONLINE
data = {
'submitter': request.user.id,
'org_applicant': org_applicant,
'proxy_applicant': proxy_applicant,
'licence_purposes': licence_purposes,
'application_type': application_type,
'submit_type': submit_type,
}
if not licence_purposes:
raise serializers.ValidationError(
'Please select at least one purpose')
with transaction.atomic():
licence_purposes_queryset = LicencePurpose.objects.filter(
id__in=licence_purposes
)
licence_category = licence_purposes_queryset.first().licence_category
licence_activities = Application.get_active_licence_activities(
request, application_type)
licence_activity_ids = Application.get_active_licence_activities(
request, application_type).values_list('licence_activity_id', flat=True)
# active_applications are applications linked with licences that have CURRENT or SUSPENDED activities
active_applications = Application.get_active_licence_applications(request, application_type) \
.filter(licence_purposes__licence_category_id=licence_category.id) \
.order_by('-id')
active_current_applications = active_applications.exclude(
selected_activities__activity_status=ApplicationSelectedActivity.ACTIVITY_STATUS_SUSPENDED
)
# determine licence no from active application for category.
latest_active_licence = WildlifeLicence.objects.filter(
licence_category_id=licence_category.id,
id__in=active_applications.values_list('licence_id', flat=True)
).order_by('-id').first()
# Initial validation
if application_type in [
Application.APPLICATION_TYPE_AMENDMENT,
Application.APPLICATION_TYPE_RENEWAL,
Application.APPLICATION_TYPE_REISSUE,
]:
# Check an Application Selected Activity has been chosen.
if not (selected_activity and selected_purpose):
raise serializers.ValidationError(
'Cannot create application: licence not found!'
)
# Check that at least one active application exists in this
# licence category for amendment/renewal.
if not latest_active_licence:
raise serializers.ValidationError(
'Cannot create amendment application: active licence not found!')
# Ensure purpose ids are in a shared set with the latest
# current applications purposes to prevent front-end
# tampering. Remove any that aren't valid for
# renew/amendment/reissue.
active_current_purposes = active_current_applications.filter(
licence_purposes__licence_activity_id__in=licence_activity_ids,
licence_purposes__id__in=licence_purposes,
).values_list(
'licence_purposes__id',
flat=True
)
# Set the previous for these application types.
# Although multiple purposes of the same type can exist for
# a licence, only one can be created for selected activity.
previous_application = licence_activities.filter(
id=int(selected_activity)
).values_list(
'application_id',
flat=True
).first()
data['previous_application'] = previous_application
# cleaned_purpose_ids = set(active_current_purposes) & set(licence_purposes)
# Set to the latest licence purpose version in queryset.
amendable_purposes_qs = licence_purposes_queryset
cleaned_purposes = [
p.get_latest_version() for p in amendable_purposes_qs
if p.id in active_current_purposes
]
cleaned_purpose_ids = [p.id for p in cleaned_purposes]
# cleaned_purpose_ids = []
data['licence_purposes'] = cleaned_purpose_ids
if latest_active_licence:
# Store currently active licence against application.
data['licence'] = latest_active_licence.id
# Use serializer for external application creation - do not
# expose unneeded fields.
serializer = CreateExternalApplicationSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
# Pre-fill the Application Form and Conditions with data from
# current Application Selected Activity (selected_activity).
# NOTE: Only selected purpose can be amended or renewed.
if application_type in [
Application.APPLICATION_TYPE_AMENDMENT,
Application.APPLICATION_TYPE_RENEWAL,
]:
target_application = serializer.instance
copied_purpose_ids = []
activity = licence_activities.filter(
id=int(selected_activity)).first()
selected_purpose = activity.proposed_purposes.filter(
id=int(selected_purpose)).first()
activity.application.copy_application_purpose_to_target_application(
target_application,
selected_purpose.purpose_id,
)
activity.application.copy_conditions_to_target(
target_application,
selected_purpose.purpose_id,
)
# When Licence Purpose has been replaced update target with
# the latest version using the selected_purpose from the
# accepted application.
licence_version_updated = \
target_application.update_application_purpose_version(
selected_purpose,
)
if licence_version_updated:
action = ApplicationUserAction.ACTION_VERSION_LICENCE_
target_application.log_user_action(
action.format(
selected_purpose.purpose.short_name,
selected_purpose.purpose.version,
),
request
)
# Set previous_application to the latest active application if
# exists
if not serializer.instance.previous_application \
and latest_active_licence:
serializer.instance.previous_application_id =\
latest_active_licence.current_application.id
serializer.instance.save()
# serializer.instance.update_dynamic_attributes()
ApplicationService.update_dynamic_attributes(
serializer.instance)
# Use fee policy to set initial base fee for the application.
policy = \
ApplicationFeePolicy.get_fee_policy_for(serializer.instance)
policy.set_base_application_fee_for(serializer.instance)
response = Response(serializer.data)
return response
except Exception as e:
logger.error('ApplicationViewSet.create() {}'.format(e))
traceback.print_exc()
raise serializers.ValidationError(str(e))
def update(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = SaveApplicationSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
def destroy(self, request, *args, **kwargs):
http_status = status.HTTP_200_OK
instance = self.get_object()
if instance.processing_status != Application.PROCESSING_STATUS_DRAFT:
raise serializers.ValidationError(
'You cannot discard a submitted application!')
instance.activities.filter(
processing_status=ApplicationSelectedActivity.PROCESSING_STATUS_DRAFT
).update(
processing_status=ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED
)
return Response({'processing_status': ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED
}, status=http_status)
@detail_route(methods=['DELETE', ])
def discard_activity(self, request, *args, **kwargs):
http_status = status.HTTP_200_OK
activity_id = request.GET.get('activity_id')
instance = self.get_object()
try:
activity = instance.activities.get(
licence_activity_id=activity_id,
processing_status=ApplicationSelectedActivity.PROCESSING_STATUS_DRAFT
)
except ApplicationSelectedActivity.DoesNotExist:
raise serializers.ValidationError("This activity cannot be discarded at this time.")
activity.processing_status = ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED
activity.save()
return Response({'processing_status': instance.processing_status}, status=http_status)
@detail_route(methods=['GET', ])
def assessment_details(self, request, *args, **kwargs):
# queryset = self.get_queryset()
instance = self.get_object()
queryset = Assessment.objects.filter(application=instance.id)
licence_activity = self.request.query_params.get(
'licence_activity', None)
if licence_activity is not None:
queryset = queryset.filter(
licence_activity=licence_activity)
serializer = AssessmentSerializer(queryset, many=True)
return Response(serializer.data)
@list_route(methods=['POST', ])
def set_application_species(self, request, *args, **kwargs):
species_ids = request.data.get('field_data')
if species_ids is not None:
species_list = ApplicationService.get_licence_species(species_ids)
return Response({'species': species_list })
return Response({
'species': None
})
class ApplicationConditionViewSet(viewsets.ModelViewSet):
queryset = ApplicationCondition.objects.all()
serializer_class = ApplicationConditionSerializer
def get_queryset(self):
user = self.request.user
if is_internal(self.request):
return ApplicationCondition.objects.all()
elif is_customer(self.request):
user_orgs = [
org.id for org in user.wildlifecompliance_organisations.all()]
user_applications = [application.id for application in Application.objects.filter(
Q(org_applicant_id__in=user_orgs) | Q(proxy_applicant=user) | Q(submitter=user))]
return ApplicationCondition.objects.filter(
Q(application_id__in=user_applications))
return ApplicationCondition.objects.none()
@detail_route(methods=['DELETE', ])
def delete(self, request, *args, **kwargs):
from wildlifecompliance.components.returns.services import ReturnService
try:
instance = self.get_object()
with transaction.atomic():
ReturnService.discard_return_request(request, instance)
instance.application.log_user_action(
ApplicationUserAction.ACTION_DELETE_CONDITION.format(
instance.licence_purpose.short_name,
instance.condition[:256],
),
request
)
instance.delete()
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def update_condition(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance = serializer.save()
instance.application.log_user_action(
ApplicationUserAction.ACTION_UPDATE_CONDITION.format(
instance.licence_purpose.short_name,
instance.condition[:256],
),
request
)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
def create(self, request, *args, **kwargs):
try:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance = serializer.save()
instance.set_source(request.user)
instance.submit()
instance.application.log_user_action(
ApplicationUserAction.ACTION_CREATE_CONDITION.format(
instance.licence_purpose.short_name,
instance.condition[:256],
),
request
)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def move_up(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.up()
instance.save()
instance.application.log_user_action(
ApplicationUserAction.ACTION_ORDER_CONDITION_UP.format(
instance.condition[:256]), request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def move_down(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.down()
instance.save()
instance.application.log_user_action(
ApplicationUserAction.ACTION_ORDER_CONDITION_DOWN.format(
instance.condition[:256]), request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class ApplicationSelectedActivityViewSet(viewsets.ModelViewSet):
queryset = ApplicationSelectedActivity.objects.all()
serializer_class = ApplicationSelectedActivitySerializer
def get_queryset(self):
if is_internal(self.request):
return ApplicationSelectedActivity.objects.all()
elif is_customer(self.request):
return ApplicationSelectedActivity.objects.none()
return ApplicationSelectedActivity.objects.none()
@detail_route(methods=['POST', ])
def process_issuance_document(self, request, *args, **kwargs):
try:
instance = self.get_object()
returned_data = process_generic_document(request, instance, document_type="issuance_documents")
if returned_data:
return Response(returned_data)
else:
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class ApplicationStandardConditionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ApplicationStandardCondition.objects.all()
serializer_class = ApplicationStandardConditionSerializer
def get_queryset(self):
if is_internal(self.request):
return ApplicationStandardCondition.objects.all()
elif is_customer(self.request):
return ApplicationStandardCondition.objects.none()
return ApplicationStandardCondition.objects.none()
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
search = request.GET.get('search')
if search:
queryset = queryset.filter(text__icontains=search)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class AssessmentPaginatedViewSet(viewsets.ModelViewSet):
filter_backends = (ApplicationFilterBackend,)
pagination_class = DatatablesPageNumberPagination
renderer_classes = (ApplicationRenderer,)
queryset = Assessment.objects.none()
serializer_class = DTAssessmentSerializer
page_size = 10
def get_queryset(self):
if is_internal(self.request):
return Assessment.objects.all()
elif is_customer(self.request):
return Assessment.objects.none()
return Assessment.objects.none()
@list_route(methods=['GET', ])
def datatable_list(self, request, *args, **kwargs):
self.serializer_class = DTAssessmentSerializer
# Get the assessor groups the current user is member of
perm_user = PermissionUser(request.user)
assessor_groups = perm_user.get_wildlifelicence_permission_group(
'assessor', first=False)
# For each assessor groups get the assessments
queryset = self.get_queryset().none()
for group in assessor_groups:
queryset = queryset | Assessment.objects.filter(
assessor_group=group) | Assessment.objects.filter(
actioned_by=self.request.user)
queryset = self.filter_queryset(queryset)
self.paginator.page_size = queryset.count()
result_page = self.paginator.paginate_queryset(queryset, request)
serializer = DTAssessmentSerializer(
result_page, context={'request': request}, many=True)
return self.paginator.get_paginated_response(serializer.data)
class AssessmentViewSet(viewsets.ModelViewSet):
queryset = Assessment.objects.all()
serializer_class = AssessmentSerializer
def get_queryset(self):
if is_internal(self.request):
return Assessment.objects.all()
elif is_customer(self.request):
return Assessment.objects.none()
return Assessment.objects.none()
@list_route(methods=['GET', ])
def get_latest_for_application_activity(self, request, *args, **kwargs):
application_id = request.query_params.get(
'application_id', None)
activity_id = request.query_params.get(
'activity_id', None)
latest_assessment = Assessment.objects.filter(
application_id=application_id,
licence_activity_id=activity_id
).exclude(
status='recalled'
).latest('id')
serializer = AssessmentSerializer(latest_assessment)
return Response(serializer.data)
@list_route(methods=['GET', ])
def user_list(self, request, *args, **kwargs):
# Get the assessor groups the current user is member of
perm_user = PermissionUser(request.user)
assessor_groups = perm_user.get_wildlifelicence_permission_group('assessor', first=False)
# For each assessor groups get the assessments
queryset = self.get_queryset().none()
for group in assessor_groups:
queryset = queryset | Assessment.objects.filter(
assessor_group=group)
serializer = DTAssessmentSerializer(queryset, many=True)
return Response(serializer.data)
@renderer_classes((JSONRenderer,))
def create(self, request, *args, **kwargs):
try:
serializer = SaveAssessmentSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
instance.generate_assessment(request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
logger.error('AssessmentViewSet.create(): {0}'.format(e))
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def remind_assessment(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.remind_assessment(request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def recall_assessment(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.recall_assessment(request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def resend_assessment(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.resend_assessment(request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['PUT', ])
def update_assessment(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = SimpleSaveAssessmentSerializer(instance, data=self.request.data, partial=True)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
logger.error(
'AssessmentViewSet.update_assessment(): {0}'.format(e)
)
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class AssessorGroupViewSet(viewsets.ModelViewSet):
queryset = ActivityPermissionGroup.objects.none()
serializer_class = ActivityPermissionGroupSerializer
renderer_classes = [JSONRenderer, ]
def get_queryset(self, application=None):
if is_internal(self.request):
if application is not None:
return application.get_permission_groups('assessor')
return ActivityPermissionGroup.objects.filter(
permissions__codename='assessor'
)
elif is_customer(self.request):
return ActivityPermissionGroup.objects.none()
return ActivityPermissionGroup.objects.none()
@list_route(methods=['POST', ])
def user_list(self, request, *args, **kwargs):
app_id = request.data.get('application_id')
application = Application.objects.get(id=app_id)
queryset = self.get_queryset(application)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class AmendmentRequestViewSet(viewsets.ModelViewSet):
queryset = AmendmentRequest.objects.all()
serializer_class = AmendmentRequestSerializer
def get_queryset(self):
user = self.request.user
if is_internal(self.request):
return AmendmentRequest.objects.all()
elif is_customer(self.request):
user_orgs = [
org.id for org in user.wildlifecompliance_organisations.all()]
user_applications = [application.id for application in Application.objects.filter(
Q(org_applicant_id__in=user_orgs) | Q(proxy_applicant=user) | Q(submitter=user))]
return AmendmentRequest.objects.filter(
Q(application_id__in=user_applications))
return AmendmentRequest.objects.none()
def create(self, request, *args, **kwargs):
try:
amend_data = self.request.data
reason = amend_data.pop('reason')
application_id = amend_data.pop('application')
text = amend_data.pop('text')
activity_list = amend_data.pop('activity_list')
if not activity_list:
raise serializers.ValidationError('Please select at least one activity to amend!')
data = {}
application = Application.objects.get(id=application_id)
for activity_id in activity_list:
data = {
'application': application_id,
'reason': reason,
'text': text,
'licence_activity': activity_id
}
selected_activity = application.get_selected_activity(activity_id)
if selected_activity.processing_status == ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED:
raise serializers.ValidationError('Selected activity has been discarded by the customer!')
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
instance.reason = reason
instance.generate_amendment(request)
# Set all proposed purposes back to selected.
STATUS = \
ApplicationSelectedActivityPurpose.PROCESSING_STATUS_SELECTED
p_ids = [ p.purpose.id \
for p in selected_activity.proposed_purposes.all() ]
selected_activity.set_proposed_purposes_process_status_for(
p_ids, STATUS)
# send email
send_application_amendment_notification(
data, application, request)
serializer = self.get_serializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
logger.error(
'AmendmentRequestViewSet.create(): {0}'.format(e)
)
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class AmendmentRequestReasonChoicesView(views.APIView):
renderer_classes = [JSONRenderer, ]
def get(self, request, format=None):
choices_list = []
choices = AmendmentRequest.REASON_CHOICES
if choices:
for c in choices:
choices_list.append({'key': c[0], 'value': c[1]})
return Response(choices_list)
| 41.095439
| 117
| 0.599414
|
75c358f5a494fbfd6d87a789a431aa2633edd326
| 1,413
|
py
|
Python
|
bakery/management/commands/unpublish.py
|
NDevox/django-bakery
|
bc012735a54d6bddabd1264094acb138effb248d
|
[
"MIT"
] | 314
|
2015-01-14T20:00:09.000Z
|
2021-08-12T15:06:03.000Z
|
bakery/management/commands/unpublish.py
|
NDevox/django-bakery
|
bc012735a54d6bddabd1264094acb138effb248d
|
[
"MIT"
] | 94
|
2015-01-02T16:12:53.000Z
|
2021-06-10T20:32:01.000Z
|
bakery/management/commands/unpublish.py
|
NDevox/django-bakery
|
bc012735a54d6bddabd1264094acb138effb248d
|
[
"MIT"
] | 61
|
2015-01-12T17:03:47.000Z
|
2021-05-27T11:12:42.000Z
|
import logging
from django.conf import settings
from django.core.management.base import CommandError
from bakery.management.commands import BasePublishCommand
logger = logging.getLogger(__name__)
class Command(BasePublishCommand):
help = "Empties the Amazon S3 bucket defined in settings.py"
bucket_unconfig_msg = "Bucket unconfigured. Set AWS_BUCKET_NAME in settings.py or provide it with --aws-bucket-name"
def add_arguments(self, parser):
parser.add_argument(
"--aws-bucket-name",
action="store",
dest="aws_bucket_name",
default='',
help="Specify the AWS bucket to sync with. Will use settings.AWS_BUCKET_NAME by default."
)
def handle(self, *args, **options):
if options.get("aws_bucket_name"):
aws_bucket_name = options.get("aws_bucket_name")
else:
# Otherwise try to find it the settings
if not hasattr(settings, 'AWS_BUCKET_NAME'):
raise CommandError(self.bucket_unconfig_msg)
aws_bucket_name = settings.AWS_BUCKET_NAME
# Pull all the keys from the bucket
all_objects = self.get_all_objects_in_bucket(aws_bucket_name)
keys = all_objects.keys()
self.batch_delete_s3_objects(keys, aws_bucket_name)
# A little logging
logger.info("unpublish completed, %d deleted files" % len(keys))
| 38.189189
| 120
| 0.675867
|
fc40136414af9d340cb7038c28e5ad6d1a3f7136
| 1,601
|
py
|
Python
|
sdoh/process.py
|
uw-bionlp/uwbionlp-parser
|
2216b25521438d7ca53a6b717e5b2ed94731cf88
|
[
"BSD-3-Clause"
] | 2
|
2021-02-24T21:32:26.000Z
|
2021-05-17T02:27:23.000Z
|
sdoh/process.py
|
uw-bionlp/uwbionlp-parser
|
2216b25521438d7ca53a6b717e5b2ed94731cf88
|
[
"BSD-3-Clause"
] | null | null | null |
sdoh/process.py
|
uw-bionlp/uwbionlp-parser
|
2216b25521438d7ca53a6b717e5b2ed94731cf88
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import json
from docker.extractor import Extractor
from proto.python.uwbionlp_pb2 import PredictionOutput, PredictionEvent, PredictionEventArgument
class DocumentProcessor():
def __init__(self):
self.model_dir = os.path.join('model','sdoh')
self.word_embed_dir = os.path.join('model','word2vec')
self.extractor = Extractor('sdoh', self.model_dir, self.word_embed_dir)
def predict(self, text, device=-1):
if not len(text.strip()):
prediction = []
else:
prediction = self.extractor.predict(text, device, 'json')
# Return Protobuf Result object.
result = PredictionOutput()
for ev in prediction:
pred_ev = PredictionEvent()
pred_ev.type = val_else_empty_str(ev['type'])
for arg in ev['arguments']:
pred_arg = PredictionEventArgument()
pred_arg.char_start_idx = arg['indices'][0] if arg['indices'] else -1
pred_arg.char_end_idx = arg['indices'][1] if arg['indices'] else -1
pred_arg.label = val_else_empty_str(arg['label'])
pred_arg.text = val_else_empty_str(arg['text'])
pred_arg.type = val_else_empty_str(arg['type'])
pred_ev.arguments.append(pred_arg)
result.predictions.append(pred_ev)
return result
def val_else_empty_list(val):
if val: return val
return []
def val_else_empty_str(val):
if val: return val
return ''
def val_else_default_int(val):
if val != None: return val
return -1
| 34.06383
| 96
| 0.625234
|
927ce323178c606eaa0cf02567deca7bfd2932a6
| 1,105
|
py
|
Python
|
vispy/gloo/tests/test_globject.py
|
hmaarrfk/vispy
|
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
|
[
"BSD-3-Clause"
] | 3
|
2019-02-28T16:05:33.000Z
|
2020-05-03T21:29:03.000Z
|
vispy/gloo/tests/test_globject.py
|
hmaarrfk/vispy
|
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
|
[
"BSD-3-Clause"
] | 19
|
2015-06-16T14:33:22.000Z
|
2015-07-27T21:18:15.000Z
|
graphViz/vispy/gloo/tests/test_globject.py
|
onecklam/ethereum-graphviz
|
6993accf0cb85e23013bf7ae6b04145724a6dbd2
|
[
"Apache-2.0"
] | 1
|
2019-04-03T12:49:18.000Z
|
2019-04-03T12:49:18.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
from vispy.testing import run_tests_if_main
from vispy.gloo.globject import GLObject
def test_globject():
""" Test gl object uinique id and GLIR CREATE command """
objects = [GLObject() for i in range(10)]
ids = [ob.id for ob in objects]
# Verify that each id is unique (test should not care how)
assert len(set(ids)) == len(objects)
# Verify that glir commands have been created
commands = []
for ob in objects:
commands.extend(ob._glir.clear())
assert len(commands) == len(objects)
for cmd in commands:
assert cmd[0] == 'CREATE'
# Delete
ob = objects[-1]
q = ob._glir # get it now, because its gone after we delete it
ob.delete()
cmd = q.clear()[-1]
assert cmd[0] == 'DELETE'
run_tests_if_main()
| 29.864865
| 79
| 0.545701
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.