hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
231fd77087bf5102bd7314a9822c5d40c1d988dc
| 3,194
|
py
|
Python
|
pre_processing/minio_args_poster.py
|
archital/perfgun
|
f814a40bc50e64879f2cb5fe2312e1f22817ec65
|
[
"Apache-2.0"
] | null | null | null |
pre_processing/minio_args_poster.py
|
archital/perfgun
|
f814a40bc50e64879f2cb5fe2312e1f22817ec65
|
[
"Apache-2.0"
] | null | null | null |
pre_processing/minio_args_poster.py
|
archital/perfgun
|
f814a40bc50e64879f2cb5fe2312e1f22817ec65
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import requests
from os import environ
import shutil
DATA_FOR_POST_PROCESSING_FOLDER = "/tmp/data_for_post_processing/"
def get_args():
parser = argparse.ArgumentParser(description='Simlog parser.')
parser.add_argument("-t", "--type", help="Test type.")
parser.add_argument("-s", "--simulation", help='Test simulation', default=None)
parser.add_argument("-b", "--build_id", help="build ID", default=None)
parser.add_argument("-en", "--env", help="Test type.", default=None)
parser.add_argument("-i", "--influx_host", help='InfluxDB host or IP', default=None)
parser.add_argument("-p", "--influx_port", help='InfluxDB port', default=8086)
parser.add_argument("-iu", "--influx_user", help='InfluxDB user', default="")
parser.add_argument("-ip", "--influx_password", help='InfluxDB password', default="")
parser.add_argument("-cm", "--comparison_metric", help='Comparison metric', default="pct95")
parser.add_argument("-idb", "--influx_db", help='Comparison InfluxDB', default="gatling")
parser.add_argument("-icdb", "--comparison_db", help='Comparison InfluxDB', default="comparison")
parser.add_argument("-itdb", "--thresholds_db", help='Thresholds InfluxDB', default="thresholds")
parser.add_argument("-tl", "--test_limit", help='test_limit', default=5)
parser.add_argument("-l", "--lg_id", help='Load generator ID', default=None)
parser.add_argument("-el", "--error_logs", help='Path to the error logs', default='/tmp/')
parser.add_argument("-trl", "--test_results_log", help='Path to the test results log',
default='/opt/gatling/bin/logs/test_results.log')
return vars(parser.parse_args())
if __name__ == '__main__':
args = get_args()
prefix = environ.get('DISTRIBUTED_MODE_PREFIX')
token = environ.get('token')
if prefix:
PROJECT_ID = environ.get('project_id')
URL = environ.get('galloper_url')
BUCKET = environ.get("results_bucket")
if not all(a for a in [URL, BUCKET]):
exit(0)
# Make archive with data for post processing
with open(DATA_FOR_POST_PROCESSING_FOLDER + "args.json", 'w') as f:
f.write(json.dumps(args))
with open(DATA_FOR_POST_PROCESSING_FOLDER + "aggregated_errors.json", 'w') as f:
f.write(json.dumps({}))
path_to_test_results = "/tmp/" + prefix + "_" + str(args['lg_id'])
shutil.make_archive(path_to_test_results, 'zip', DATA_FOR_POST_PROCESSING_FOLDER)
# Send data to minio
headers = {'Authorization': f'bearer {token}'} if token else {}
if PROJECT_ID:
upload_url = f'{URL}/api/v1/artifacts/{PROJECT_ID}/{BUCKET}/file'
requests.post(f'{URL}/api/v1/artifacts/{PROJECT_ID}/{BUCKET}', allow_redirects=True, headers=headers)
else:
upload_url = f'{URL}/artifacts/{BUCKET}/upload'
requests.post(f'{URL}/artifacts/bucket', allow_redirects=True, data={'bucket': BUCKET}, headers=headers)
files = {'file': open(path_to_test_results + ".zip", 'rb')}
requests.post(upload_url, allow_redirects=True, files=files, headers=headers)
| 50.698413
| 116
| 0.663745
|
666820acb34bc9a899f30c99d0f94570e2e71236
| 3,482
|
py
|
Python
|
Sandbox/.ycm_extra_conf.py
|
noidawt/AulysEngine
|
6e4a6482961fc27e48342d4767f2d075869b1abf
|
[
"MIT"
] | 1
|
2020-09-28T10:04:02.000Z
|
2020-09-28T10:04:02.000Z
|
Sandbox/.ycm_extra_conf.py
|
noidawt/AulysEngine
|
6e4a6482961fc27e48342d4767f2d075869b1abf
|
[
"MIT"
] | null | null | null |
Sandbox/.ycm_extra_conf.py
|
noidawt/AulysEngine
|
6e4a6482961fc27e48342d4767f2d075869b1abf
|
[
"MIT"
] | null | null | null |
import os
import ycm_core
from clang_helpers import PrepareClangFlags
def DirectoryOfThisScript():
#return os.path.dirname(os.path.abspath(__file__))
#print(os.path.join(os.path.dirname(os.path.abspath(__file__)),
# "export-compile-commands/debug/"))
return os.path.join(os.path.dirname(os.path.abspath(__file__)), ".export-compile-commands/debug/")
#return os.path.dirname(os.path.join("/llvm_tools/debug/",os.path.abspath(__file__)))
# This is the single most important line in this script. Everything else is just nice to have but
# not strictly necessary.
compilation_database_folder = DirectoryOfThisScript()
# This provides a safe fall-back if no compilation commands are available. You could also add a
# includes relative to your project directory, for example.
flags = [
'-Wall',
'-std=c++11',
'-stdlib=libc++',
'-x',
'c++',
'-I',
'.',
'-isystem', '/usr/local/include',
'-isystem', '/usr/include',
'-I.',
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm', '.h', '.hpp' ]
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 30.814159
| 102
| 0.690408
|
249d09c91411708583bb718472c7917117b49761
| 14,634
|
py
|
Python
|
lib/galaxy/tool_util/deps/mulled/mulled_search.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 1
|
2021-05-18T02:20:43.000Z
|
2021-05-18T02:20:43.000Z
|
lib/galaxy/tool_util/deps/mulled/mulled_search.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/tool_util/deps/mulled/mulled_search.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
import argparse
import json
import logging
import sys
import tempfile
import requests
from .mulled_list import get_singularity_containers
from .util import build_target, v2_image_name
try:
from conda.cli.python_api import run_command
except ImportError:
run_command = None # type: ignore
try:
from whoosh.fields import Schema
from whoosh.fields import TEXT
from whoosh.fields import STORED
from whoosh.index import create_in
from whoosh.qparser import QueryParser
except ImportError:
Schema = TEXT = STORED = create_in = QueryParser = None # type: ignore
QUAY_API_URL = 'https://quay.io/api/v1/repository'
class QuaySearch():
"""
Tool to search within a quay organization for a given software name.
"""
def __init__(self, organization):
self.index = None
self.organization = organization
def build_index(self):
"""
Create an index to quickly examine the repositories of a given quay.io organization.
"""
# download all information about the repositories from the
# given organization in self.organization
parameters = {'public': 'true', 'namespace': self.organization}
r = requests.get(QUAY_API_URL, headers={
'Accept-encoding': 'gzip'}, params=parameters, timeout=12)
tmp_dir = tempfile.mkdtemp()
schema = Schema(title=TEXT(stored=True), content=STORED)
self.index = create_in(tmp_dir, schema)
json_decoder = json.JSONDecoder()
decoded_request = json_decoder.decode(r.text)
writer = self.index.writer()
for repository in decoded_request['repositories']:
writer.add_document(
title=repository['name'], content=repository['description'])
writer.commit()
def search_repository(self, search_string, non_strict):
"""
Search Docker containers on quay.io.
Results are displayed with all available versions,
including the complete image name.
"""
# with statement closes searcher after usage.
with self.index.searcher() as searcher:
query = QueryParser(
"title", self.index.schema).parse(search_string)
results = searcher.search(query)
if non_strict:
# look for spelling errors and use suggestions as a search term too
corrector = searcher.corrector("title")
suggestions = corrector.suggest(search_string, limit=2)
# get all repositories with suggested keywords
for suggestion in suggestions:
search_string = f"*{suggestion}*"
query = QueryParser(
"title", self.index.schema).parse(search_string)
results_tmp = searcher.search(query)
results.extend(results_tmp)
out = list()
for result in results:
title = result['title']
for version in self.get_additional_repository_information(title):
out.append({'package': title, 'version': version, })
return out
def get_additional_repository_information(self, repository_string):
"""
Function downloads additional information from quay.io to
get the tag-field which includes the version number.
"""
url = f"{QUAY_API_URL}/{self.organization}/{repository_string}"
r = requests.get(url, headers={'Accept-encoding': 'gzip'})
json_decoder = json.JSONDecoder()
decoded_request = json_decoder.decode(r.text)
return decoded_request['tags']
class CondaSearch():
"""
Tool to search the bioconda channel
"""
def __init__(self, channel):
self.channel = channel
def get_json(self, search_string):
"""
Function takes search_string variable and returns results from the bioconda channel in JSON format
"""
if run_command is None:
raise Exception(f"Invalid search destination. {deps_error_message('conda')}")
raw_out, err, exit_code = run_command(
'search', '-c',
self.channel,
search_string,
use_exception_handler=True)
if exit_code != 0:
logging.info(f'Search failed with: {err}')
return []
return [{'package': n.split()[0], 'version': n.split()[1], 'build': n.split()[2]} for n in raw_out.split('\n')[2:-1]]
class GitHubSearch():
"""
Tool to search the GitHub bioconda-recipes repo
"""
def get_json(self, search_string):
"""
Takes search_string variable and return results from the bioconda-recipes github repository in JSON format
"""
response = requests.get(
f"https://api.github.com/search/code?q={search_string}+in:path+repo:bioconda/bioconda-recipes+path:recipes").json()
return response
def process_json(self, json, search_string):
"""
Take JSON input and process it, returning the required data
"""
json = json['items'][0:10] # get top ten results
results = []
for result in json:
results.append({'name': result['name'], 'path': result['path']})
return results
def recipe_present(self, search_string):
"""
Check if a recipe exists in bioconda-recipes which matches search_string exactly
"""
if requests.get(f"https://api.github.com/repos/bioconda/bioconda-recipes/contents/recipes/{search_string}").status_code == 200:
return True
else:
return False
def get_package_hash(packages, versions):
"""
Take packages and versions (if the latter are given) and returns a hash for each. Also checks github to see if the container is already present.
"""
hash_results = {}
targets = []
if versions:
for p in packages:
targets.append(build_target(p, version=versions[p]))
else: # if versions are not given only calculate the package hash
for p in packages:
targets.append(build_target(p))
# make the hash from the processed targets
package_hash = v2_image_name(targets)
hash_results['package_hash'] = package_hash.split(':')[0]
if versions:
hash_results['version_hash'] = package_hash.split(':')[1]
r = requests.get(f"https://quay.io/api/v1/repository/biocontainers/{hash_results['package_hash']}")
if r.status_code == 200:
hash_results['container_present'] = True
if versions: # now test if the version hash is listed in the repository tags
# remove -0, -1, etc from end of the tag
tags = [n[:-2] for n in r.json()['tags']]
if hash_results['version_hash'] in tags:
hash_results['container_present_with_version'] = True
else:
hash_results['container_present_with_version'] = False
else:
hash_results['container_present'] = False
return hash_results
def singularity_search(search_string):
"""
Check if a singularity package is present and return the link.
"""
results = []
containers = get_singularity_containers()
for container in containers:
if search_string in container:
name = container.split(':')[0]
version = container.split(':')[1]
results.append({'package': name, 'version': version})
return results
def readable_output(json, organization='biocontainers', channel='bioconda'):
# if json is empty:
if sum([len(json[destination][results]) for destination in json for results in json[destination]]) == 0:
sys.stdout.write('No results found for that query.\n')
return
# return results for quay, conda and singularity together
if sum([len(json[destination][results]) for destination in ['quay', 'conda', 'singularity', ] for results in json.get(destination, [])]) > 0:
sys.stdout.write("The query returned the following result(s).\n")
# put quay, conda etc results as lists in lines
lines = [['LOCATION', 'NAME', 'VERSION', 'COMMAND\n']]
for results in json.get('quay', {}).values():
for result in results:
lines.append(['quay', result['package'], result['version'], 'docker pull quay.io/%s/%s:%s\n' %
(organization, result['package'], result['version'])]) # NOT a real solution
for results in json.get('conda', {}).values():
for result in results:
lines.append(['conda', result['package'], f"{result['version']}--{result['build']}",
f"conda install -c {channel} {result['package']}={result['version']}={result['build']}\n"])
for results in json.get('singularity', {}).values():
for result in results:
lines.append(['singularity', result['package'], result['version'],
f"wget https://depot.galaxyproject.org/singularity/{result['package']}:{result['version']}\n"])
col_width0, col_width1, col_width2 = (max(len(
line[n]) for line in lines) + 2 for n in (0, 1, 2)) # def max col widths for the output
# create table
for line in lines:
sys.stdout.write("".join((line[0].ljust(col_width0), line[1].ljust(
col_width1), line[2].ljust(col_width2), line[3]))) # output
if json.get('github_recipe_present', False):
sys.stdout.write('\n' if 'lines' in locals() else '')
sys.stdout.write(
'The following recipes were found in the bioconda-recipes repository which exactly matched one of the search terms:\n')
lines = [['QUERY', 'LOCATION\n']]
for recipe in json['github_recipe_present']['recipes']:
lines.append(
[recipe, f"https://api.github.com/repos/bioconda/bioconda-recipes/contents/recipes/{recipe}\n"])
col_width0 = max(len(line[0]) for line in lines) + 2
for line in lines:
sys.stdout.write(
"".join((line[0].ljust(col_width0), line[1]))) # output
if sum([len(json['github'][results]) for results in json.get('github', [])]) > 0:
sys.stdout.write('\n' if 'lines' in locals() else '')
sys.stdout.write(
"Other result(s) on the bioconda-recipes GitHub repository:\n")
lines = [['QUERY', 'FILE', 'URL\n']]
for search_string, results in json.get('github', {}).items():
for result in results:
lines.append([search_string, result['name'],
f"https://github.com/bioconda/bioconda-recipes/tree/master/{result['path']}\n"])
# def max col widths for the output
col_width0, col_width1 = (
max(len(line[n]) for line in lines) + 2 for n in (0, 1))
for line in lines:
sys.stdout.write("".join(
(line[0].ljust(col_width0), line[1].ljust(col_width1), line[2]))) # output
def deps_error_message(package):
return f"Required dependency [{package}] is not installed. Run 'pip install galaxy-tool-util[mulled]'."
def main(argv=None):
if Schema is None:
sys.stdout.write(deps_error_message("Whoosh"))
return
destination_defaults = ['quay', 'singularity', 'github']
if run_command is not None:
destination_defaults.append('conda')
parser = argparse.ArgumentParser(
description='Searches in a given quay organization for a repository')
parser.add_argument('-d', '--destination', dest='search_dest', nargs='+', default=destination_defaults,
help="Choose where to search. Options are 'conda', 'quay', 'singularity' and 'github'. If no option are given, all will be searched.")
parser.add_argument('-o', '--organization', dest='organization_string', default="biocontainers",
help='Change quay organization to search; default is biocontainers.')
parser.add_argument('-c', '--channel', dest='channel_string', default="bioconda",
help='Change conda channel to search; default is bioconda.')
parser.add_argument('--non-strict', dest='non_strict', action="store_true",
help='Autocorrection of typos activated. Lists more results but can be confusing.\
For too many queries quay.io blocks the request and the results can be incomplete.')
parser.add_argument('-j', '--json', dest='json',
action="store_true", help='Returns results as JSON.')
parser.add_argument('-s', '--search', required=True, nargs='+',
help='The name of the tool(s) to search for.')
args = parser.parse_args()
json_results = {dest: {} for dest in args.search_dest}
versions = {}
if len(args.search) > 1: # get hash if multiple packages are searched
args.search.append(get_package_hash(
args.search, versions)['package_hash'])
if 'conda' in args.search_dest:
conda_results = {}
conda = CondaSearch(args.channel_string)
for item in args.search:
conda_results[item] = conda.get_json(item)
json_results['conda'] = conda_results
if 'github' in args.search_dest:
github_results = {}
github_recipe_present = []
github = GitHubSearch()
for item in args.search:
github_json = github.get_json(item)
github_results[item] = github.process_json(github_json, item)
if github.recipe_present(item):
github_recipe_present.append(item)
json_results['github'] = github_results
json_results['github_recipe_present'] = {
'recipes': github_recipe_present}
if 'quay' in args.search_dest:
quay_results = {}
quay = QuaySearch(args.organization_string)
quay.build_index()
for item in args.search:
quay_results[item] = quay.search_repository(item, args.non_strict)
json_results['quay'] = quay_results
if 'singularity' in args.search_dest:
singularity_results = {}
for item in args.search:
singularity_results[item] = singularity_search(item)
json_results['singularity'] = singularity_results
if args.json:
print(json_results)
else:
readable_output(json_results, args.organization_string,
args.channel_string)
if __name__ == "__main__":
main()
| 39.444744
| 158
| 0.617876
|
40f8408d07c7c370a6866a6361e2b728cd6edce0
| 824
|
py
|
Python
|
sample/sample_application/urls.py
|
2000charge/ap-python-sdk
|
ae96737b8021f5d2816ce73dd370544675e6b55d
|
[
"MIT"
] | null | null | null |
sample/sample_application/urls.py
|
2000charge/ap-python-sdk
|
ae96737b8021f5d2816ce73dd370544675e6b55d
|
[
"MIT"
] | null | null | null |
sample/sample_application/urls.py
|
2000charge/ap-python-sdk
|
ae96737b8021f5d2816ce73dd370544675e6b55d
|
[
"MIT"
] | null | null | null |
"""sample_application URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^app/', include('app.urls')),
url(r'^admin/', admin.site.urls),
]
| 35.826087
| 79
| 0.700243
|
dce3f4c749099b7b8fe4881d65321643fd402174
| 8,515
|
py
|
Python
|
course/views.py
|
Nuclear-Smile/ClassSelectionManageSystem
|
c457716307a35f5d9742627114129688ae25513a
|
[
"MIT"
] | 11
|
2021-01-13T05:12:24.000Z
|
2022-03-17T16:29:30.000Z
|
course/views.py
|
Nuclear-Smile/ClassSelectionManageSystem
|
c457716307a35f5d9742627114129688ae25513a
|
[
"MIT"
] | 1
|
2021-04-21T04:16:11.000Z
|
2021-04-21T04:17:14.000Z
|
course/views.py
|
Nuclear-Smile/ClassSelectionManageSystem
|
c457716307a35f5d9742627114129688ae25513a
|
[
"MIT"
] | 4
|
2021-04-26T02:35:49.000Z
|
2021-12-12T09:28:23.000Z
|
from django.http.response import HttpResponse
from django.shortcuts import render, reverse, redirect
from django.db.models import Q
from constants import INVALID_KIND, INVALID_REQUEST_METHOD, ILLEGAL_KIND
from course.forms import CourseForm, ScheduleForm
from course.models import Course, StudentCourse, Schedule
from user.util import get_user
from django.utils import timezone
def to_home(request):
kind = request.session.get('kind', '')
return redirect(reverse("course", kwargs={"kind": kind}))
def home(request, kind):
if kind == "teacher":
return teacher_home(request)
elif kind == "student":
return student_home(request)
return HttpResponse(INVALID_KIND)
def teacher_home(request):
user = get_user(request, "teacher")
if not user:
return redirect(reverse("login", kwargs={"kind": "teacher"}))
info = {
"name": user.name,
"kind": "teacher",
}
is_search = False
search_key = ""
if request.method == "POST":
search_key = request.POST.get("search")
if search_key:
is_search = True
context = {"info": info}
q = Q(teacher=user)
if is_search:
q = q & Q(name__icontains=search_key)
context["search_key"] = search_key
context["course_list"] = Course.objects.filter(q).order_by('status')
return render(request, 'course/teacher/home.html', context)
def student_home(request):
return redirect(reverse("view_course", kwargs={"view_kind": "current"}))
def create_course(request):
user = get_user(request, "teacher")
if not user:
return redirect(reverse("login", kwargs={"kind": "teacher"}))
info = {
"name": user.name,
"kind": "teacher",
}
if request.method == 'POST':
form = CourseForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.status = 1
obj.teacher = user
obj.save()
return redirect(reverse("course", kwargs={"kind": "teacher"}))
elif request.method == 'GET':
form = CourseForm()
else:
return HttpResponse(INVALID_REQUEST_METHOD)
return render(request, 'course/teacher/create_course.html', {'info': info, 'form': form})
def create_schedule(request, course_id):
user = get_user(request, "teacher")
if not user:
return redirect(reverse("login", kwargs={"kind": "teacher"}))
info = {
"name": user.name,
"kind": "teacher",
}
course = Course.objects.get(pk=course_id)
if request.method == 'POST':
form = ScheduleForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.course = course
obj.save()
return redirect(reverse("view_detail", kwargs={"course_id": course_id}))
elif request.method == 'GET':
form = ScheduleForm()
else:
return HttpResponse(INVALID_REQUEST_METHOD)
return render(request, 'course/teacher/create_schedule.html', {'info': info, 'form': form, "course": course})
def delete_schedule(request, schedule_id):
user = get_user(request, "teacher")
if not user:
return redirect(reverse("login", kwargs={"kind": "teacher"}))
schedule = Schedule.objects.get(pk=schedule_id)
course_id = request.GET.get("course_id") or schedule.course.id
schedule.delete()
return redirect(reverse("view_detail", kwargs={"course_id": course_id}))
def handle_course(request, course_id, handle_kind):
"""
:param request:
:param course_id:
:param handle_kind:
1: "开始选课",
2: "结束选课",
3: "结课",
4: "给分完成"
:return:
"""
user = get_user(request, "teacher")
if not user:
return redirect(reverse("login", kwargs={"kind": "teacher"}))
info = {
"name": user.name,
"kind": "teacher",
}
course = Course.objects.get(pk=course_id)
if course.status == handle_kind and course.status < 5:
if course.status == 4:
scs = StudentCourse.objects.filter(course=course)
all_given = True
res = ""
for sc in scs:
if sc.scores is None:
all_given = False
res += "<div>%s 未打分</div>" % sc.student
if all_given:
course.status += 1
course.save()
return redirect(reverse("view_detail", kwargs={"course_id": course.id}))
else:
return HttpResponse(res)
else:
course.status += 1
course.save()
course_list = Course.objects.filter(teacher=user)
return render(request, 'course/teacher/home.html', {'info': info, 'course_list': course_list})
def view_detail(request, course_id):
user = get_user(request, "teacher")
if not user:
return redirect(reverse("login", kwargs={"kind": "teacher"}))
info = {
"name": user.name,
"kind": "teacher",
}
course = Course.objects.get(pk=course_id)
c_stu_list = StudentCourse.objects.filter(course=course)
sche_list = Schedule.objects.filter(course=course)
context = {
"info": info,
"course": course,
"course_students": c_stu_list,
"schedules": sche_list
}
if course.status == 5:
sorted_cs_list = sorted(c_stu_list, key=lambda cs: cs.scores)
context["sorted_course_students"] = sorted_cs_list
return render(request, "course/teacher/course.html", context)
def view_course(request, view_kind):
"""
:param view_kind:
current: 查看当前课程
is_end: 查看结课课程
select: 选课
withdraw: 撤课
"""
user = get_user(request, "student")
if not user:
return redirect(reverse("login", kwargs={"kind": "student"}))
is_search = False
search_key = ""
if request.method == "POST":
search_key = request.POST.get("search")
if search_key:
is_search = True
info = {
"name": user.name,
"kind": "student",
}
course_list = []
if view_kind in ["select", "current", "withdraw", "is_end"]:
if view_kind == "select":
q = Q(status=2)
if is_search:
q = q & (Q(name__icontains=search_key) | Q(teacher__name__icontains=search_key))
course_list = Course.objects.filter(q)
my_course = StudentCourse.objects.filter(Q(student=user) & Q(with_draw=False))
my_cids = [c.course.id for c in my_course]
course_list = [c for c in course_list if c.id not in my_cids]
else:
q = Q(student=user) & Q(with_draw=False)
if is_search:
q = q & (Q(course__name__icontains=search_key) | Q(course__teacher__name__icontains=search_key))
my_course = StudentCourse.objects.filter(q)
if view_kind == "current":
course_list = [c.course for c in my_course if c.course.status < 4]
elif view_kind == "withdraw":
course_list = [c.course for c in my_course if c.course.status == 2]
elif view_kind == "is_end":
course_list = [c for c in my_course if c.course.status >= 4]
else:
return HttpResponse(INVALID_REQUEST_METHOD)
context = {
'info': info,
'view_kind': view_kind,
'course_list': course_list
}
if is_search:
context["search_key"] = search_key
return render(request, 'course/student/home.html', context)
def operate_course(request, operate_kind, course_id):
"""
:param operate_kind:
current: 查看当前课程
is_end: 查看结课课程
select: 选课
withdraw: 撤课
"""
user = get_user(request, "student")
if not user:
return redirect(reverse("login", kwargs={"kind": "student"}))
if operate_kind not in ["select", "withdraw"]:
return HttpResponse(ILLEGAL_KIND)
elif operate_kind == "select":
course = Course.objects.filter(pk=course_id).get()
new_course = StudentCourse(student=user, course=course)
new_course.save()
elif operate_kind == "withdraw":
q = Q(course__id=course_id) & Q(student=user) & Q(with_draw=False)
course = StudentCourse.objects.filter(q).get()
course.with_draw = True
course.with_draw_time = timezone.now()
course.save()
return redirect(reverse("view_course", kwargs={"view_kind": operate_kind}))
| 29.463668
| 113
| 0.601174
|
4a9d291f33e80fdfdf0da6fa2932307fbabc46bc
| 40,336
|
py
|
Python
|
vyper/parser/expr.py
|
olwee/vyper
|
b532e6e3b8a6912baad20f8cae5e4429b6e6c76b
|
[
"MIT"
] | null | null | null |
vyper/parser/expr.py
|
olwee/vyper
|
b532e6e3b8a6912baad20f8cae5e4429b6e6c76b
|
[
"MIT"
] | null | null | null |
vyper/parser/expr.py
|
olwee/vyper
|
b532e6e3b8a6912baad20f8cae5e4429b6e6c76b
|
[
"MIT"
] | null | null | null |
import ast
from vyper.exceptions import (
InvalidLiteralException,
NonPayableViolationException,
StructureException,
TypeMismatchException,
VariableDeclarationException,
ParserException
)
from vyper.parser.lll_node import LLLnode
from vyper.parser import self_call
from vyper.parser.parser_utils import (
getpos,
unwrap_location,
get_original_if_0_prefixed,
get_number_as_fraction,
add_variable_offset,
)
from vyper.utils import (
MemoryPositions,
SizeLimits,
bytes_to_int,
string_to_bytes,
DECIMAL_DIVISOR,
checksum_encode,
is_varname_valid,
)
from vyper.types import (
BaseType,
ByteArrayType,
ContractType,
ListType,
MappingType,
NullType,
StructType,
TupleType,
)
from vyper.types import (
is_base_type,
)
from vyper.types import (
are_units_compatible,
is_numeric_type,
combine_units
)
class Expr(object):
# TODO: Once other refactors are made reevaluate all inline imports
def __init__(self, expr, context):
self.expr = expr
self.context = context
self.expr_table = {
LLLnode: self.get_expr,
ast.Num: self.number,
ast.Str: self.string,
ast.NameConstant: self.constants,
ast.Name: self.variables,
ast.Attribute: self.attribute,
ast.Subscript: self.subscript,
ast.BinOp: self.arithmetic,
ast.Compare: self.compare,
ast.BoolOp: self.boolean_operations,
ast.UnaryOp: self.unary_operations,
ast.Call: self.call,
ast.List: self.list_literals,
ast.Dict: self.struct_literals,
ast.Tuple: self.tuple_literals,
}
expr_type = self.expr.__class__
if expr_type in self.expr_table:
self.lll_node = self.expr_table[expr_type]()
else:
raise Exception("Unsupported operator: %r" % ast.dump(self.expr))
def get_expr(self):
return self.expr
def number(self):
orignum = get_original_if_0_prefixed(self.expr, self.context)
if orignum is None and isinstance(self.expr.n, int):
# Literal (mostly likely) becomes int128
if SizeLimits.in_bounds('int128', self.expr.n) or self.expr.n < 0:
return LLLnode.from_list(self.expr.n, typ=BaseType('int128', unit=None, is_literal=True), pos=getpos(self.expr))
# Literal is large enough (mostly likely) becomes uint256.
else:
return LLLnode.from_list(self.expr.n, typ=BaseType('uint256', unit=None, is_literal=True), pos=getpos(self.expr))
elif isinstance(self.expr.n, float):
numstring, num, den = get_number_as_fraction(self.expr, self.context)
# if not SizeLimits.in_bounds('decimal', num // den):
# if not SizeLimits.MINDECIMAL * den <= num <= SizeLimits.MAXDECIMAL * den:
if not (SizeLimits.MINNUM * den < num < SizeLimits.MAXNUM * den):
raise InvalidLiteralException("Number out of range: " + numstring, self.expr)
if DECIMAL_DIVISOR % den:
raise InvalidLiteralException("Too many decimal places: " + numstring, self.expr)
return LLLnode.from_list(num * DECIMAL_DIVISOR // den, typ=BaseType('decimal', unit=None), pos=getpos(self.expr))
# Binary literal.
elif orignum[:2] == '0b':
str_val = orignum[2:]
total_bits = len(orignum[2:])
total_bits = total_bits if total_bits % 8 == 0 else total_bits + 8 - (total_bits % 8) # ceil8 to get byte length.
if len(orignum[2:]) != total_bits: # Support only full formed bit definitions.
raise InvalidLiteralException("Bit notation requires a multiple of 8 bits / 1 byte. {} bit(s) are missing.".format(total_bits - len(orignum[2:])), self.expr)
byte_len = int(total_bits / 8)
placeholder = self.context.new_placeholder(ByteArrayType(byte_len))
seq = []
seq.append(['mstore', placeholder, byte_len])
for i in range(0, total_bits, 256):
section = str_val[i:i + 256]
int_val = int(section, 2) << (256 - len(section)) # bytes are right padded.
seq.append(
['mstore', ['add', placeholder, i + 32], int_val])
return LLLnode.from_list(['seq'] + seq + [placeholder],
typ=ByteArrayType(byte_len), location='memory', pos=getpos(self.expr), annotation='Create ByteArray (Binary literal): %s' % str_val)
elif len(orignum) == 42:
if checksum_encode(orignum) != orignum:
raise InvalidLiteralException("""Address checksum mismatch. If you are sure this is the
right address, the correct checksummed form is: %s""" % checksum_encode(orignum), self.expr)
return LLLnode.from_list(self.expr.n, typ=BaseType('address', is_literal=True), pos=getpos(self.expr))
elif len(orignum) == 66:
return LLLnode.from_list(self.expr.n, typ=BaseType('bytes32', is_literal=True), pos=getpos(self.expr))
else:
raise InvalidLiteralException("Cannot read 0x value with length %d. Expecting 42 (address incl 0x) or 66 (bytes32 incl 0x)"
% len(orignum), self.expr)
# Byte array literals
def string(self):
bytez, bytez_length = string_to_bytes(self.expr.s)
placeholder = self.context.new_placeholder(ByteArrayType(bytez_length))
seq = []
seq.append(['mstore', placeholder, bytez_length])
for i in range(0, len(bytez), 32):
seq.append(['mstore', ['add', placeholder, i + 32], bytes_to_int((bytez + b'\x00' * 31)[i: i + 32])])
return LLLnode.from_list(['seq'] + seq + [placeholder],
typ=ByteArrayType(bytez_length), location='memory', pos=getpos(self.expr), annotation='Create ByteArray: %s' % bytez)
# True, False, None constants
def constants(self):
if self.expr.value is True:
return LLLnode.from_list(1, typ=BaseType('bool', is_literal=True), pos=getpos(self.expr))
elif self.expr.value is False:
return LLLnode.from_list(0, typ=BaseType('bool', is_literal=True), pos=getpos(self.expr))
elif self.expr.value is None:
return LLLnode.from_list(None, typ=NullType(), pos=getpos(self.expr))
else:
raise Exception("Unknown name constant: %r" % self.expr.value.value)
# Variable names
def variables(self):
builtin_constants = {
'ZERO_ADDRESS': LLLnode.from_list([0], typ=BaseType('address', None, is_literal=True), pos=getpos(self.expr)),
'MAX_INT128': LLLnode.from_list(['mload', MemoryPositions.MAXNUM], typ=BaseType('int128', None, is_literal=True), pos=getpos(self.expr)),
'MIN_INT128': LLLnode.from_list(['mload', MemoryPositions.MINNUM], typ=BaseType('int128', None, is_literal=True), pos=getpos(self.expr)),
'MAX_DECIMAL': LLLnode.from_list(['mload', MemoryPositions.MAXDECIMAL], typ=BaseType('decimal', None, is_literal=True), pos=getpos(self.expr)),
'MIN_DECIMAL': LLLnode.from_list(['mload', MemoryPositions.MINDECIMAL], typ=BaseType('decimal', None, is_literal=True), pos=getpos(self.expr)),
'MAX_UINT256': LLLnode.from_list([2**256 - 1], typ=BaseType('uint256', None, is_literal=True), pos=getpos(self.expr)),
}
if self.expr.id == 'self':
return LLLnode.from_list(['address'], typ='address', pos=getpos(self.expr))
elif self.expr.id in self.context.vars:
var = self.context.vars[self.expr.id]
return LLLnode.from_list(var.pos, typ=var.typ, location='memory', pos=getpos(self.expr), annotation=self.expr.id, mutable=var.mutable)
elif self.expr.id in builtin_constants:
return builtin_constants[self.expr.id]
elif self.expr.id in self.context.constants:
# check if value is compatible with
const = self.context.constants[self.expr.id]
if isinstance(const, ast.AnnAssign): # Handle ByteArrays.
expr = Expr(const.value, self.context).lll_node
return expr
# Other types are already unwrapped, no need
return self.context.constants[self.expr.id]
else:
raise VariableDeclarationException("Undeclared variable: " + self.expr.id, self.expr)
# x.y or x[5]
def attribute(self):
# x.balance: balance of address x
if self.expr.attr == 'balance':
addr = Expr.parse_value_expr(self.expr.value, self.context)
if not is_base_type(addr.typ, 'address'):
raise TypeMismatchException("Type mismatch: balance keyword expects an address as input", self.expr)
return LLLnode.from_list(['balance', addr], typ=BaseType('uint256', {'wei': 1}), location=None, pos=getpos(self.expr))
# x.codesize: codesize of address x
elif self.expr.attr == 'codesize' or self.expr.attr == 'is_contract':
addr = Expr.parse_value_expr(self.expr.value, self.context)
if not is_base_type(addr.typ, 'address'):
raise TypeMismatchException("Type mismatch: codesize keyword expects an address as input", self.expr)
if self.expr.attr == 'codesize':
eval_code = ['extcodesize', addr]
output_type = 'int128'
else:
eval_code = ['gt', ['extcodesize', addr], 0]
output_type = 'bool'
return LLLnode.from_list(eval_code, typ=BaseType(output_type), location=None, pos=getpos(self.expr))
# self.x: global attribute
elif isinstance(self.expr.value, ast.Name) and self.expr.value.id == "self":
if self.expr.attr not in self.context.globals:
raise VariableDeclarationException("Persistent variable undeclared: " + self.expr.attr, self.expr)
var = self.context.globals[self.expr.attr]
return LLLnode.from_list(var.pos, typ=var.typ, location='storage', pos=getpos(self.expr), annotation='self.' + self.expr.attr)
# Reserved keywords
elif isinstance(self.expr.value, ast.Name) and self.expr.value.id in ("msg", "block", "tx"):
key = self.expr.value.id + "." + self.expr.attr
if key == "msg.sender":
if self.context.is_private:
raise ParserException("msg.sender not allowed in private functions.", self.expr)
return LLLnode.from_list(['caller'], typ='address', pos=getpos(self.expr))
elif key == "msg.value":
if not self.context.is_payable:
raise NonPayableViolationException("Cannot use msg.value in a non-payable function", self.expr)
return LLLnode.from_list(['callvalue'], typ=BaseType('uint256', {'wei': 1}), pos=getpos(self.expr))
elif key == "msg.gas":
return LLLnode.from_list(['gas'], typ='uint256', pos=getpos(self.expr))
elif key == "block.difficulty":
return LLLnode.from_list(['difficulty'], typ='uint256', pos=getpos(self.expr))
elif key == "block.timestamp":
return LLLnode.from_list(['timestamp'], typ=BaseType('uint256', {'sec': 1}, True), pos=getpos(self.expr))
elif key == "block.coinbase":
return LLLnode.from_list(['coinbase'], typ='address', pos=getpos(self.expr))
elif key == "block.number":
return LLLnode.from_list(['number'], typ='uint256', pos=getpos(self.expr))
elif key == "block.prevhash":
return LLLnode.from_list(['blockhash', ['sub', 'number', 1]], typ='bytes32', pos=getpos(self.expr))
elif key == "tx.origin":
return LLLnode.from_list(['origin'], typ='address', pos=getpos(self.expr))
else:
raise Exception("Unsupported keyword: " + key)
# Other variables
else:
sub = Expr.parse_variable_location(self.expr.value, self.context)
# contract type
if isinstance(sub.typ, ContractType):
return sub
if not isinstance(sub.typ, StructType):
raise TypeMismatchException("Type mismatch: member variable access not expected", self.expr.value)
attrs = sorted(sub.typ.members.keys())
if self.expr.attr not in attrs:
raise TypeMismatchException("Member %s not found. Only the following available: %s" % (self.expr.attr, " ".join(attrs)), self.expr)
return add_variable_offset(sub, self.expr.attr, pos=getpos(self.expr))
def subscript(self):
sub = Expr.parse_variable_location(self.expr.value, self.context)
if isinstance(sub.typ, (MappingType, ListType)):
if 'value' not in vars(self.expr.slice):
raise StructureException("Array access must access a single element, not a slice", self.expr)
index = Expr.parse_value_expr(self.expr.slice.value, self.context)
elif isinstance(sub.typ, TupleType):
if not isinstance(self.expr.slice.value, ast.Num) or self.expr.slice.value.n < 0 or self.expr.slice.value.n >= len(sub.typ.members):
raise TypeMismatchException("Tuple index invalid", self.expr.slice.value)
index = self.expr.slice.value.n
else:
raise TypeMismatchException("Bad subscript attempt", self.expr.value)
o = add_variable_offset(sub, index, pos=getpos(self.expr))
o.mutable = sub.mutable
return o
def arithmetic_get_reference(self, item):
item_lll = Expr.parse_value_expr(item, self.context)
if isinstance(item, ast.Call):
# We only want to perform call statements once.
placeholder = self.context.new_placeholder(item_lll.typ)
pre_alloc = ['mstore', placeholder, item_lll]
return pre_alloc, LLLnode.from_list(['mload', placeholder], location='memory', typ=item_lll.typ)
else:
return None, item_lll
def arithmetic(self):
pre_alloc_left, left = self.arithmetic_get_reference(self.expr.left)
pre_alloc_right, right = self.arithmetic_get_reference(self.expr.right)
if not is_numeric_type(left.typ) or not is_numeric_type(right.typ):
raise TypeMismatchException("Unsupported types for arithmetic op: %r %r" % (left.typ, right.typ), self.expr)
arithmetic_pair = {left.typ.typ, right.typ.typ}
# Special Case: Simplify any literal to literal arithmetic at compile time.
if left.typ.is_literal and right.typ.is_literal and \
isinstance(right.value, int) and isinstance(left.value, int):
if isinstance(self.expr.op, ast.Add):
val = left.value + right.value
elif isinstance(self.expr.op, ast.Sub):
val = left.value - right.value
elif isinstance(self.expr.op, ast.Mult):
val = left.value * right.value
elif isinstance(self.expr.op, ast.Div):
val = left.value // right.value
elif isinstance(self.expr.op, ast.Mod):
val = left.value % right.value
elif isinstance(self.expr.op, ast.Pow):
val = left.value ** right.value
else:
raise ParserException('Unsupported literal operator: %s' % str(type(self.expr.op)), self.expr)
num = ast.Num(val)
num.source_code = self.expr.source_code
num.lineno = self.expr.lineno
num.col_offset = self.expr.col_offset
return Expr.parse_value_expr(num, self.context)
# Special case with uint256 were int literal may be casted.
if arithmetic_pair == {'uint256', 'int128'}:
# Check right side literal.
if right.typ.is_literal and SizeLimits.in_bounds('uint256', right.value):
right = LLLnode.from_list(right.value, typ=BaseType('uint256', None, is_literal=True), pos=getpos(self.expr))
arithmetic_pair = {left.typ.typ, right.typ.typ}
# Check left side literal.
elif left.typ.is_literal and SizeLimits.in_bounds('uint256', left.value):
left = LLLnode.from_list(left.value, typ=BaseType('uint256', None, is_literal=True), pos=getpos(self.expr))
arithmetic_pair = {left.typ.typ, right.typ.typ}
# Only allow explicit conversions to occur.
if left.typ.typ != right.typ.typ:
raise TypeMismatchException("Cannot implicitly convert {} to {}.".format(left.typ.typ, right.typ.typ), self.expr)
ltyp, rtyp = left.typ.typ, right.typ.typ
if isinstance(self.expr.op, (ast.Add, ast.Sub)):
if left.typ.unit != right.typ.unit and left.typ.unit is not None and right.typ.unit is not None:
raise TypeMismatchException("Unit mismatch: %r %r" % (left.typ.unit, right.typ.unit), self.expr)
if left.typ.positional and right.typ.positional and isinstance(self.expr.op, ast.Add):
raise TypeMismatchException("Cannot add two positional units!", self.expr)
new_unit = left.typ.unit or right.typ.unit
new_positional = left.typ.positional ^ right.typ.positional # xor, as subtracting two positionals gives a delta
op = 'add' if isinstance(self.expr.op, ast.Add) else 'sub'
if ltyp == 'uint256' and isinstance(self.expr.op, ast.Add):
o = LLLnode.from_list(['seq',
# Checks that: a + b >= a
['assert', ['ge', ['add', left, right], left]],
['add', left, right]], typ=BaseType('uint256', new_unit, new_positional), pos=getpos(self.expr))
elif ltyp == 'uint256' and isinstance(self.expr.op, ast.Sub):
o = LLLnode.from_list(['seq',
# Checks that: a >= b
['assert', ['ge', left, right]],
['sub', left, right]], typ=BaseType('uint256', new_unit, new_positional), pos=getpos(self.expr))
elif ltyp == rtyp:
o = LLLnode.from_list([op, left, right], typ=BaseType(ltyp, new_unit, new_positional), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation '%r(%r, %r)'" % (op, ltyp, rtyp))
elif isinstance(self.expr.op, ast.Mult):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot multiply positional values!", self.expr)
new_unit = combine_units(left.typ.unit, right.typ.unit)
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['if', ['eq', left, 0], [0],
['seq', ['assert', ['eq', ['div', ['mul', left, right], left], right]],
['mul', left, right]]], typ=BaseType('uint256', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'int128':
o = LLLnode.from_list(['mul', left, right], typ=BaseType('int128', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'decimal':
o = LLLnode.from_list(['with', 'r', right, ['with', 'l', left,
['with', 'ans', ['mul', 'l', 'r'],
['seq',
['assert', ['or', ['eq', ['sdiv', 'ans', 'l'], 'r'], ['iszero', 'l']]],
['sdiv', 'ans', DECIMAL_DIVISOR]]]]], typ=BaseType('decimal', new_unit), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation 'mul(%r, %r)'" % (ltyp, rtyp))
elif isinstance(self.expr.op, ast.Div):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot divide positional values!", self.expr)
new_unit = combine_units(left.typ.unit, right.typ.unit, div=True)
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['seq',
# Checks that: b != 0
['assert', right],
['div', left, right]], typ=BaseType('uint256', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'int128':
o = LLLnode.from_list(['sdiv', left, ['clamp_nonzero', right]], typ=BaseType('int128', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'decimal':
o = LLLnode.from_list(['with', 'l', left, ['with', 'r', ['clamp_nonzero', right],
['sdiv', ['mul', 'l', DECIMAL_DIVISOR], 'r']]],
typ=BaseType('decimal', new_unit), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation 'div(%r, %r)'" % (ltyp, rtyp))
elif isinstance(self.expr.op, ast.Mod):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot use positional values as modulus arguments!", self.expr)
if left.typ.unit != right.typ.unit and left.typ.unit is not None and right.typ.unit is not None:
raise TypeMismatchException("Modulus arguments must have same unit", self.expr)
new_unit = left.typ.unit or right.typ.unit
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['seq',
['assert', right],
['mod', left, right]], typ=BaseType('uint256', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp:
o = LLLnode.from_list(['smod', left, ['clamp_nonzero', right]], typ=BaseType(ltyp, new_unit), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation 'mod(%r, %r)'" % (ltyp, rtyp))
elif isinstance(self.expr.op, ast.Pow):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot use positional values as exponential arguments!", self.expr)
if right.typ.unit:
raise TypeMismatchException("Cannot use unit values as exponents", self.expr)
if ltyp != 'int128' and ltyp != 'uint256' and isinstance(self.expr.right, ast.Name):
raise TypeMismatchException("Cannot use dynamic values as exponents, for unit base types", self.expr)
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['seq',
['assert', ['or', ['or', ['eq', right, 1], ['iszero', right]],
['lt', left, ['exp', left, right]]]],
['exp', left, right]], typ=BaseType('uint256'), pos=getpos(self.expr))
elif ltyp == rtyp == 'int128':
new_unit = left.typ.unit
if left.typ.unit and not isinstance(self.expr.right, ast.Name):
new_unit = {left.typ.unit.copy().popitem()[0]: self.expr.right.n}
o = LLLnode.from_list(['exp', left, right], typ=BaseType('int128', new_unit), pos=getpos(self.expr))
else:
raise TypeMismatchException('Only whole number exponents are supported', self.expr)
else:
raise Exception("Unsupported binop: %r" % self.expr.op)
p = ['seq']
if pre_alloc_left:
p.append(pre_alloc_left)
if pre_alloc_right:
p.append(pre_alloc_right)
if o.typ.typ == 'int128':
p.append(['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]])
return LLLnode.from_list(p, typ=o.typ, pos=getpos(self.expr))
elif o.typ.typ == 'decimal':
p.append(['clamp', ['mload', MemoryPositions.MINDECIMAL], o, ['mload', MemoryPositions.MAXDECIMAL]])
return LLLnode.from_list(p, typ=o.typ, pos=getpos(self.expr))
if o.typ.typ == 'uint256':
p.append(o)
return LLLnode.from_list(p, typ=o.typ, pos=getpos(self.expr))
else:
raise Exception("%r %r" % (o, o.typ))
def build_in_comparator(self):
from vyper.parser.parser import make_setter
left = Expr(self.expr.left, self.context).lll_node
right = Expr(self.expr.comparators[0], self.context).lll_node
if left.typ.typ != right.typ.subtype.typ:
raise TypeMismatchException("%s cannot be in a list of %s" % (left.typ.typ, right.typ.subtype.typ))
result_placeholder = self.context.new_placeholder(BaseType('bool'))
setter = []
# Load nth item from list in memory.
if right.value == 'multi':
# Copy literal to memory to be compared.
tmp_list = LLLnode.from_list(
obj=self.context.new_placeholder(ListType(right.typ.subtype, right.typ.count)),
typ=ListType(right.typ.subtype, right.typ.count),
location='memory'
)
setter = make_setter(tmp_list, right, 'memory', pos=getpos(self.expr))
load_i_from_list = ['mload', ['add', tmp_list, ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]]]
elif right.location == "storage":
load_i_from_list = ['sload', ['add', ['sha3_32', right], ['mload', MemoryPositions.FREE_LOOP_INDEX]]]
else:
load_i_from_list = ['mload', ['add', right, ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]]]
# Condition repeat loop has to break on.
break_loop_condition = [
'if',
['eq', unwrap_location(left), load_i_from_list],
['seq',
['mstore', '_result', 1], # store true.
'break']
]
# Repeat loop to loop-compare each item in the list.
for_loop_sequence = [
['mstore', result_placeholder, 0],
['with', '_result', result_placeholder,
['repeat', MemoryPositions.FREE_LOOP_INDEX, 0, right.typ.count, break_loop_condition]],
['mload', result_placeholder]
]
# Save list to memory, so one can iterate over it,
# used when literal was created with tmp_list.
if setter:
compare_sequence = ['seq', setter] + for_loop_sequence
else:
compare_sequence = ['seq'] + for_loop_sequence
# Compare the result of the repeat loop to 1, to know if a match was found.
o = LLLnode.from_list([
'eq', 1,
compare_sequence],
typ='bool',
annotation="in comporator"
)
return o
@staticmethod
def _signed_to_unsigned_comparision_op(op):
translation_map = {
'sgt': 'gt',
'sge': 'ge',
'sle': 'le',
'slt': 'lt',
}
if op in translation_map:
return translation_map[op]
else:
return op
def compare(self):
left = Expr.parse_value_expr(self.expr.left, self.context)
right = Expr.parse_value_expr(self.expr.comparators[0], self.context)
if isinstance(left.typ, ByteArrayType) and isinstance(right.typ, ByteArrayType):
if left.typ.maxlen != right.typ.maxlen:
raise TypeMismatchException('Can only compare bytes of the same length', self.expr)
if left.typ.maxlen > 32 or right.typ.maxlen > 32:
raise ParserException('Can only compare bytes of length shorter than 32 bytes', self.expr)
elif isinstance(self.expr.ops[0], ast.In) and \
isinstance(right.typ, ListType):
if not are_units_compatible(left.typ, right.typ.subtype) and not are_units_compatible(right.typ.subtype, left.typ):
raise TypeMismatchException("Can't use IN comparison with different types!", self.expr)
return self.build_in_comparator()
else:
if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ):
raise TypeMismatchException("Can't compare values with different units!", self.expr)
if len(self.expr.ops) != 1:
raise StructureException("Cannot have a comparison with more than two elements", self.expr)
if isinstance(self.expr.ops[0], ast.Gt):
op = 'sgt'
elif isinstance(self.expr.ops[0], ast.GtE):
op = 'sge'
elif isinstance(self.expr.ops[0], ast.LtE):
op = 'sle'
elif isinstance(self.expr.ops[0], ast.Lt):
op = 'slt'
elif isinstance(self.expr.ops[0], ast.Eq):
op = 'eq'
elif isinstance(self.expr.ops[0], ast.NotEq):
op = 'ne'
else:
raise Exception("Unsupported comparison operator")
# Compare (limited to 32) byte arrays.
if isinstance(left.typ, ByteArrayType) and isinstance(left.typ, ByteArrayType):
left = Expr(self.expr.left, self.context).lll_node
right = Expr(self.expr.comparators[0], self.context).lll_node
def load_bytearray(side):
if side.location == 'memory':
return ['mload', ['add', 32, side]]
elif side.location == 'storage':
return ['sload', ['add', 1, ['sha3_32', side]]]
return LLLnode.from_list(
[op, load_bytearray(left), load_bytearray(right)], typ='bool', pos=getpos(self.expr))
# Compare other types.
if not is_numeric_type(left.typ) or not is_numeric_type(right.typ):
if op not in ('eq', 'ne'):
raise TypeMismatchException("Invalid type for comparison op", self.expr)
left_type, right_type = left.typ.typ, right.typ.typ
# Special Case: comparison of a literal integer. If in valid range allow it to be compared.
if {left_type, right_type} == {'int128', 'uint256'} and {left.typ.is_literal, right.typ.is_literal} == {True, False}:
comparison_allowed = False
if left.typ.is_literal and SizeLimits.in_bounds(right_type, left.value):
comparison_allowed = True
elif right.typ.is_literal and SizeLimits.in_bounds(left_type, right.value):
comparison_allowed = True
op = self._signed_to_unsigned_comparision_op(op)
if comparison_allowed:
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(self.expr))
elif {left_type, right_type} == {'uint256', 'uint256'}:
op = self._signed_to_unsigned_comparision_op(op)
elif (left_type in ('decimal', 'int128') or right_type in ('decimal', 'int128')) and left_type != right_type:
raise TypeMismatchException(
'Implicit conversion from {} to {} disallowed, please convert.'.format(left_type, right_type),
self.expr
)
if left_type == right_type:
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(self.expr))
else:
raise TypeMismatchException("Unsupported types for comparison: %r %r" % (left_type, right_type), self.expr)
def boolean_operations(self):
# Iterate through values
for value in self.expr.values:
# Check for calls at assignment
if self.context.in_assignment and isinstance(value, ast.Call):
raise StructureException("Boolean operations with calls may not be performed on assignment", self.expr)
# Check for boolean operations with non-boolean inputs
_expr = Expr.parse_value_expr(value, self.context)
if not is_base_type(_expr.typ, 'bool'):
raise TypeMismatchException("Boolean operations can only be between booleans!", self.expr)
# TODO: Handle special case of literals and simplify at compile time
# Check for valid ops
if isinstance(self.expr.op, ast.And):
op = 'and'
elif isinstance(self.expr.op, ast.Or):
op = 'or'
else:
raise Exception("Unsupported bool op: " + self.expr.op)
# Handle different numbers of inputs
count = len(self.expr.values)
if count < 2:
raise StructureException("Expected at least two arguments for a bool op", self.expr)
elif count == 2:
left = Expr.parse_value_expr(self.expr.values[0], self.context)
right = Expr.parse_value_expr(self.expr.values[1], self.context)
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(self.expr))
else:
left = Expr.parse_value_expr(self.expr.values[0], self.context)
right = Expr.parse_value_expr(self.expr.values[1], self.context)
p = ['seq', [op, left, right]]
values = self.expr.values[2:]
while len(values) > 0:
value = Expr.parse_value_expr(values[0], self.context)
p = [op, value, p]
values = values[1:]
return LLLnode.from_list(p, typ='bool', pos=getpos(self.expr))
# Unary operations (only "not" supported)
def unary_operations(self):
operand = Expr.parse_value_expr(self.expr.operand, self.context)
if isinstance(self.expr.op, ast.Not):
if isinstance(operand.typ, BaseType) and operand.typ.typ == 'bool':
return LLLnode.from_list(["iszero", operand], typ='bool', pos=getpos(self.expr))
else:
raise TypeMismatchException("Only bool is supported for not operation, %r supplied." % operand.typ, self.expr)
elif isinstance(self.expr.op, ast.USub):
if not is_numeric_type(operand.typ):
raise TypeMismatchException("Unsupported type for negation: %r" % operand.typ, operand)
if operand.typ.is_literal and 'int' in operand.typ.typ:
num = ast.Num(0 - operand.value)
num.source_code = self.expr.source_code
num.lineno = self.expr.lineno
num.col_offset = self.expr.col_offset
return Expr.parse_value_expr(num, self.context)
return LLLnode.from_list(["sub", 0, operand], typ=operand.typ, pos=getpos(self.expr))
else:
raise StructureException("Only the 'not' unary operator is supported")
def _get_external_contract_keywords(self):
value, gas = None, None
for kw in self.expr.keywords:
if kw.arg not in ('value', 'gas'):
raise TypeMismatchException('Invalid keyword argument, only "gas" and "value" supported.', self.expr)
elif kw.arg == 'gas':
gas = Expr.parse_value_expr(kw.value, self.context)
elif kw.arg == 'value':
value = Expr.parse_value_expr(kw.value, self.context)
return value, gas
# Function calls
def call(self):
from vyper.parser.parser import (
external_contract_call
)
from vyper.functions import (
dispatch_table,
)
if isinstance(self.expr.func, ast.Name):
function_name = self.expr.func.id
if function_name in dispatch_table:
return dispatch_table[function_name](self.expr, self.context)
else:
err_msg = "Not a top-level function: {}".format(function_name)
if function_name in [x.split('(')[0] for x, _ in self.context.sigs['self'].items()]:
err_msg += ". Did you mean self.{}?".format(function_name)
raise StructureException(err_msg, self.expr)
elif isinstance(self.expr.func, ast.Attribute) and isinstance(self.expr.func.value, ast.Name) and self.expr.func.value.id == "self":
return self_call.make_call(self.expr, self.context)
elif isinstance(self.expr.func, ast.Attribute) and isinstance(self.expr.func.value, ast.Call):
contract_name = self.expr.func.value.func.id
contract_address = Expr.parse_value_expr(self.expr.func.value.args[0], self.context)
value, gas = self._get_external_contract_keywords()
return external_contract_call(self.expr, self.context, contract_name, contract_address, pos=getpos(self.expr), value=value, gas=gas)
elif isinstance(self.expr.func.value, ast.Attribute) and self.expr.func.value.attr in self.context.sigs:
contract_name = self.expr.func.value.attr
var = self.context.globals[self.expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(var.pos, typ=var.typ, location='storage', pos=getpos(self.expr), annotation='self.' + self.expr.func.value.attr))
value, gas = self._get_external_contract_keywords()
return external_contract_call(self.expr, self.context, contract_name, contract_address, pos=getpos(self.expr), value=value, gas=gas)
elif isinstance(self.expr.func.value, ast.Attribute) and self.expr.func.value.attr in self.context.globals:
contract_name = self.context.globals[self.expr.func.value.attr].typ.unit
var = self.context.globals[self.expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(var.pos, typ=var.typ, location='storage', pos=getpos(self.expr), annotation='self.' + self.expr.func.value.attr))
value, gas = self._get_external_contract_keywords()
return external_contract_call(self.expr, self.context, contract_name, contract_address, pos=getpos(self.expr), value=value, gas=gas)
else:
raise StructureException("Unsupported operator: %r" % ast.dump(self.expr), self.expr)
def list_literals(self):
if not len(self.expr.elts):
raise StructureException("List must have elements", self.expr)
o = []
out_type = None
for elt in self.expr.elts:
o.append(Expr(elt, self.context).lll_node)
if not out_type:
out_type = o[-1].typ
previous_type = o[-1].typ.subtype.typ if hasattr(o[-1].typ, 'subtype') else o[-1].typ
current_type = out_type.subtype.typ if hasattr(out_type, 'subtype') else out_type
if len(o) > 1 and previous_type != current_type:
raise TypeMismatchException("Lists may only contain one type", self.expr)
return LLLnode.from_list(["multi"] + o, typ=ListType(out_type, len(o)), pos=getpos(self.expr))
def struct_literals(self):
o = {}
members = {}
for key, value in zip(self.expr.keys, self.expr.values):
if not isinstance(key, ast.Name) or not is_varname_valid(key.id, self.context.custom_units):
raise TypeMismatchException("Invalid member variable for struct: %r" % vars(key).get('id', key), key)
if key.id in o:
raise TypeMismatchException("Member variable duplicated: " + key.id, key)
o[key.id] = Expr(value, self.context).lll_node
members[key.id] = o[key.id].typ
return LLLnode.from_list(["multi"] + [o[key] for key in sorted(list(o.keys()))], typ=StructType(members), pos=getpos(self.expr))
def tuple_literals(self):
if not len(self.expr.elts):
raise StructureException("Tuple must have elements", self.expr)
o = []
for elt in self.expr.elts:
o.append(Expr(elt, self.context).lll_node)
return LLLnode.from_list(["multi"] + o, typ=TupleType(o), pos=getpos(self.expr))
# Parse an expression that results in a value
def parse_value_expr(expr, context):
return unwrap_location(Expr(expr, context).lll_node)
# Parse an expression that represents an address in memory or storage
def parse_variable_location(expr, context):
o = Expr(expr, context).lll_node
if not o.location:
raise Exception("Looking for a variable location, instead got a value")
return o
| 53.781333
| 178
| 0.60058
|
cbd7a023e962af89cdd692b963f3fa1309d708c6
| 1,765
|
py
|
Python
|
lord_gym/envs/actions.py
|
tonkolviktor/lord-gym
|
ca0e2f01e28581a3a62ae7d37fa24e729edcae6e
|
[
"Apache-2.0"
] | null | null | null |
lord_gym/envs/actions.py
|
tonkolviktor/lord-gym
|
ca0e2f01e28581a3a62ae7d37fa24e729edcae6e
|
[
"Apache-2.0"
] | null | null | null |
lord_gym/envs/actions.py
|
tonkolviktor/lord-gym
|
ca0e2f01e28581a3a62ae7d37fa24e729edcae6e
|
[
"Apache-2.0"
] | null | null | null |
import logging
from functools import reduce
from gym import spaces
from lord_gym.envs.game import Player
from lord_gym.envs.objects import IdleCitizen, WoodsMan, StoneMan, IronMan
logger = logging.getLogger(__name__)
class Action:
def __init__(self, subtype_index: int):
self.subtype_index = subtype_index
def do_action(self, player: Player):
raise NotImplementedError()
@classmethod
def get_subtypes(cls) -> int:
raise NotImplementedError()
class NoAction(Action):
def do_action(self, player: Player):
pass
@classmethod
def get_subtypes(cls) -> int:
return 1
def __str__(self):
return f"No-Action"
class ConvertCitizen(Action):
pairs = [(IdleCitizen, WoodsMan), (IdleCitizen, StoneMan), (IdleCitizen, IronMan)]
subtypes = pairs + [(pair[1], pair[0]) for pair in pairs]
def __init__(self, subtype_index: int):
(self.from_type, self.to_type) = self.subtypes[subtype_index]
def do_action(self, player: Player):
player.replace_object(self.from_type, self.to_type)
@classmethod
def get_subtypes(cls) -> int:
return len(cls.subtypes)
def __str__(self):
return f"Action: {self.from_type.__name__} -> {self.to_type.__name__}"
action_order = [NoAction, ConvertCitizen]
def get_action_space():
return spaces.Discrete(reduce(lambda x, y: x + y, map(lambda x: x.get_subtypes(), action_order))) # sum up subtypes
def resolve_action(action_orig) -> Action:
action = action_orig
for action_class in action_order:
if action < action_class.get_subtypes():
return action_class(action)
action -= action_class.get_subtypes()
raise ValueError(f"Unknown action: {action_orig}")
| 25.955882
| 120
| 0.690085
|
7d7bc4d9c2b086939ca90f2e88c29f5414722696
| 634
|
py
|
Python
|
examples/signature.py
|
streetster/zte-config-utility
|
b0ee9a3dc52c074a6567a69532a527d484048901
|
[
"MIT"
] | 5
|
2020-06-09T12:22:23.000Z
|
2020-06-15T16:17:22.000Z
|
examples/signature.py
|
streetster/zte-config-utility
|
b0ee9a3dc52c074a6567a69532a527d484048901
|
[
"MIT"
] | 1
|
2020-06-11T18:02:41.000Z
|
2020-06-12T13:02:48.000Z
|
examples/signature.py
|
streetster/zte-config-utility
|
b0ee9a3dc52c074a6567a69532a527d484048901
|
[
"MIT"
] | 2
|
2020-06-09T13:56:27.000Z
|
2020-06-09T22:59:27.000Z
|
"""Extract signature text from config.bin"""
import argparse
import zcu
def main():
"""the main function"""
parser = argparse.ArgumentParser(description='Extract signature from config.bin of ZTE Routers',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', type=argparse.FileType('rb'),
help='Configuration file (config.bin)')
args = parser.parse_args()
zcu.zte.read_header(args.infile)
signature = zcu.zte.read_signature(args.infile)
print(signature.decode('utf-8'))
if __name__ == '__main__':
main()
| 27.565217
| 100
| 0.656151
|
4352003618f087f2cf5003af8b0a7658455841ef
| 4,265
|
py
|
Python
|
neo/io/asciispiketrainio.py
|
neurodebian/python-neo
|
9be7cd97572ce9c1ea5583eae700a99a05423c11
|
[
"BSD-3-Clause"
] | 1
|
2020-08-08T21:39:41.000Z
|
2020-08-08T21:39:41.000Z
|
neo/io/asciispiketrainio.py
|
michaelfsp/python-neo
|
56ce3b6a3f6944a096279404c22a75166b905844
|
[
"BSD-3-Clause"
] | null | null | null |
neo/io/asciispiketrainio.py
|
michaelfsp/python-neo
|
56ce3b6a3f6944a096279404c22a75166b905844
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
"""
Classe for reading/writing SpikeTrains in a text file.
It is the simple case where different spiketrains are written line by line.
Supported : Read/Write
Author: sgarcia
"""
from .baseio import BaseIO
from ..core import *
from .tools import create_many_to_one_relationship
import numpy as np
import quantities as pq
import csv
import os
from numpy import newaxis
class AsciiSpikeTrainIO(BaseIO):
"""
Classe for reading/writing SpikeTrain in a text file.
Each Spiketrain is a line.
Usage:
>>> from neo import io
>>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
>>> seg = r.read_segment(lazy = False, cascade = True,)
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797,
...
"""
is_readable = True
is_writable = True
supported_objects = [Segment , SpikeTrain]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('t_start' , { 'value' : 0., } ),
]
}
write_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
]
}
name = None
extensions = [ 'txt' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read/write SpikeTrains in a text file.
Each row is a spiketrain.
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy = False,
cascade = True,
delimiter = '\t',
t_start = 0.*pq.s,
unit = pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
unit = pq.Quantity(1, unit)
seg = Segment(file_origin = os.path.basename(self.filename))
if not cascade:
return seg
f = open(self.filename, 'Ur')
for i,line in enumerate(f) :
all = line[:-1].split(delimiter)
if all[-1] == '': all = all[:-1]
if all[0] == '': all = all[1:]
if lazy:
spike_times = [ ]
t_stop = t_start
else:
spike_times = np.array(all).astype('f')
t_stop = spike_times.max()*unit
sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
if lazy:
sptr.lazy_shape = len(all)
sptr.annotate(channel_index = i)
seg.spiketrains.append(sptr)
f.close()
create_many_to_one_relationship(seg)
return seg
def write_segment(self, segment,
delimiter = '\t',
):
"""
Write SpikeTrain of a Segment in a txt file.
Each row is a spiketrain.
Arguments:
segment : the segment to write. Only analog signals will be written.
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
information of t_start is lost
"""
f = open(self.filename, 'w')
for s,sptr in enumerate(segment.spiketrains) :
for ts in sptr :
f.write('%f%s'% (ts , delimiter) )
f.write('\n')
f.close()
| 28.817568
| 115
| 0.481829
|
2f6416aa6ae6aca06100c02e27b48ed25762f6be
| 59,772
|
py
|
Python
|
cyclone/web.py
|
fiorix/niosted
|
792214fee9a9e46ea531415721bfda080aa030e7
|
[
"OLDAP-2.5",
"OLDAP-2.3"
] | 1
|
2019-06-27T08:20:55.000Z
|
2019-06-27T08:20:55.000Z
|
cyclone/web.py
|
fiorix/niosted
|
792214fee9a9e46ea531415721bfda080aa030e7
|
[
"OLDAP-2.5",
"OLDAP-2.3"
] | null | null | null |
cyclone/web.py
|
fiorix/niosted
|
792214fee9a9e46ea531415721bfda080aa030e7
|
[
"OLDAP-2.5",
"OLDAP-2.3"
] | null | null | null |
#!/usr/bin/env jython
# coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cyclone.util import log
from cyclone import __version__, escape, template, httpserver
from net.fiorix.niosted import Factory
import base64
import binascii
import calendar
import Cookie
import cStringIO
import datetime
import email.utils
import functools
import gzip
import hashlib
import httplib
import hmac
import locale
import mimetypes
import os.path
import re
import stat
import sys
import time
import types
import urllib
import urlparse
import uuid
import xmlrpclib
import struct
class RequestHandler(object):
"""Subclass this class and define get() or post() to make a handler.
If you want to support more methods than the standard GET/HEAD/POST, you
should override the class variable SUPPORTED_METHODS in your
RequestHandler class.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PUT")
no_keep_alive = False
def __init__(self, application, request, transforms=None):
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = transforms or []
self.ui = _O((n, self._ui_method(m)) for n, m in
application.ui_methods.iteritems())
self.ui["modules"] = _O((n, self._ui_module(n, m)) for n, m in
application.ui_modules.iteritems())
self.clear()
self.request.connection.no_keep_alive = self.no_keep_alive
@property
def settings(self):
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called before the actual handler method.
Useful to override in a handler if you want a common bottleneck for
all of your requests.
"""
pass
def on_connection_close(self, *args, **kwargs):
"""Called when the connection is closed.
You may override this to clean up resources associated with
long-lived connections.
"""
pass
def clear(self):
"""Resets all headers and content for this response."""
self._headers = {
"Server": "CycloneServer/" + __version__,
"Content-Type": "text/html; charset=UTF-8",
}
if not self.request.supports_http_1_1():
if self.request.headers.get("Connection") == "Keep-Alive":
self.set_header("Connection", "Keep-Alive")
self._write_buffer = []
self._status_code = 200
def set_status(self, status_code):
"""Sets the status code for our response."""
assert status_code in httplib.responses
self._status_code = status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
if isinstance(value, datetime.datetime):
t = calendar.timegm(value.utctimetuple())
value = email.utils.formatdate(t, localtime=False, usegmt=True)
elif isinstance(value, int) or isinstance(value, long):
value = str(value)
else:
value = _utf8(value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
safe_value = re.sub(r"[\x00-\x1f]", " ", value)[:4000]
if safe_value != value:
raise ValueError("Unsafe header value %r", value)
self._headers[name] = value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we throw an HTTP 404 exception if it is missing.
The returned value is always unicode.
"""
values = self.request.arguments.get(name, None)
if values is None:
if default is self._ARG_DEFAULT:
raise HTTPError(404, "Missing argument %s" % name)
return default
# Get rid of any weird control chars
value = re.sub(r"[\x00-\x08\x0e-\x1f]", " ", values[-1])
value = _unicode(value)
if strip: value = value.strip()
return value
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.BaseCookie()
if "Cookie" in self.request.headers:
try:
self._cookies.load(self.request.headers["Cookie"])
except:
self.clear_all_cookies()
return self._cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if name in self.cookies:
return self.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None):
"""Sets the given cookie name/value with the given options."""
name = _utf8(name)
value = _utf8(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookies"):
self._new_cookies = []
new_cookie = Cookie.BaseCookie()
self._new_cookies.append(new_cookie)
new_cookie[name] = value
if domain:
new_cookie[name]["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
timestamp = calendar.timegm(expires.utctimetuple())
new_cookie[name]["expires"] = email.utils.formatdate(
timestamp, localtime=False, usegmt=True)
if path:
new_cookie[name]["path"] = path
return value
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name."""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self):
"""Deletes all the cookies the user sent with this request."""
for name in self.cookies.iterkeys():
self.clear_cookie(name)
def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the 'cookie_secret' setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use get_secure_cookie().
"""
timestamp = str(int(time.time()))
value = base64.b64encode(value)
signature = self._cookie_signature(name, value, timestamp)
value = "|".join([value, timestamp, signature])
return self.set_cookie(name, value, expires_days=expires_days, **kwargs)
def get_secure_cookie(self, name, include_name=True, value=None):
"""Returns the given signed cookie if it validates, or None.
In older versions of Tornado (0.1 and 0.2), we did not include the
name of the cookie in the cookie signature. To read these old-style
cookies, pass include_name=False to this method. Otherwise, all
attempts to read old-style cookies will fail (and you may log all
your users out whose cookies were written with a previous Tornado
version).
"""
if value is None: value = self.get_cookie(name)
if not value: return None
parts = value.split("|")
if len(parts) != 3: return None
if include_name:
signature = self._cookie_signature(name, parts[0], parts[1])
else:
signature = self._cookie_signature(parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
log.err("Invalid cookie signature %r" % value)
return None
timestamp = int(parts[1])
if timestamp < time.time() - 31 * 86400:
log.err("Expired cookie %r" % value)
return None
try:
return base64.b64decode(parts[0])
except:
return None
def _cookie_signature(self, *parts):
self.require_setting("cookie_secret", "secure cookies")
hash = hmac.new(self.application.settings["cookie_secret"],
digestmod=hashlib.sha1)
for part in parts: hash.update(part)
return hash.hexdigest()
def redirect(self, url, permanent=False):
"""Sends a redirect to the given (optionally relative) URL."""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
self.set_status(301 if permanent else 302)
# Remove whitespace
url = re.sub(r"[\x00-\x20]+", "", _utf8(url))
self.set_header("Location", urlparse.urljoin(self.request.uri, url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be text/javascript.
"""
assert not self._finished
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "text/javascript; charset=UTF-8")
chunk = _utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
for module in getattr(self, "_active_modules", {}).itervalues():
embed_part = module.embedded_javascript()
if embed_part: js_embed.append(_utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, basestring):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part: css_embed.append(_utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, basestring):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part: html_heads.append(_utf8(head_part))
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not path.startswith("/") and not path.startswith("http:"):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex('</body>')
html = html[:sloc] + js + '\n' + html[sloc:]
if js_embed:
js = '<script type="text/javascript">\n//<![CDATA[\n' + \
'\n'.join(js_embed) + '\n//]]>\n</script>'
sloc = html.rindex('</body>')
html = html[:sloc] + js + '\n' + html[sloc:]
if css_files:
paths = set()
for path in css_files:
if not path.startswith("/") and not path.startswith("http:"):
paths.add(self.static_url(path))
else:
paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index('</head>')
html = html[:hloc] + css + '\n' + html[hloc:]
if css_embed:
css = '<style type="text/css">\n' + '\n'.join(css_embed) + \
'\n</style>'
hloc = html.index('</head>')
html = html[:hloc] + css + '\n' + html[hloc:]
if html_heads:
hloc = html.index('</head>')
html = html[:hloc] + ''.join(html_heads) + '\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated string. To generate and write a template
as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.application.settings.get("template_path")
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
if not getattr(RequestHandler, "_templates", None):
RequestHandler._templates = {}
if template_path not in RequestHandler._templates:
RequestHandler._templates[template_path] = template.Loader(
template_path)
t = RequestHandler._templates[template_path].load(template_name)
args = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.application.reverse_url
)
args.update(self.ui)
args.update(kwargs)
return t.generate(**args)
def flush(self, include_footers=False):
"""Flushes the current output buffer to the nextwork."""
chunk = "".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._headers, chunk = transform.transform_first_chunk(
self._headers, chunk, include_footers)
headers = self._generate_headers()
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
headers = ""
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
if headers: self.request.write(headers)
return
if headers or chunk:
self.request.write(headers + chunk)
def notifyFinish(self):
return self.request.notifyFinish()
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
assert not self._finished
if chunk is not None: self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if self._status_code == 200 and self.request.method == "GET":
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
etag = '"%s"' % hasher.hexdigest()
inm = self.request.headers.get("If-None-Match")
if inm and inm.find(etag) != -1:
self._write_buffer = []
self.set_status(304)
else:
self.set_header("Etag", etag)
if "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
We also send the error HTML for the given error code as returned by
get_error_html. Override that method if you want custom error pages
for your application.
"""
if self._headers_written:
log.err("Cannot send error response after headers written")
if not self._finished:
self.finish()
return
self.clear()
self.set_status(status_code)
e = kwargs.get("exception")
if isinstance(e, HTTPAuthenticationRequired):
args = ",".join(['%s="%s"' % (k, v) for k, v in e.kwargs.items()])
self.set_header("WWW-Authenticate", "%s %s" % (e.auth_type, args))
message = self.get_error_html(status_code, **kwargs)
self.finish(message)
def get_error_html(self, status_code, **kwargs):
"""Override to implement custom error pages.
If this error was caused by an uncaught exception, the
exception object can be found in kwargs e.g. kwargs['exception']
"""
return "<html><title>%(code)d: %(message)s</title>" \
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": httplib.responses[status_code],
}
@property
def locale(self):
"""The local for the current session.
Determined by either get_user_locale, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or get_browser_locale, which uses the Accept-Language
header.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we use the Accept-Language header.
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from Accept-Language header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda (l, s): s, reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
Determined by either get_current_user, which you can override to
set the user based on, e.g., a cookie. If that method is not
overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the 'login_url' application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
"""
if not hasattr(self, "_xsrf_token"):
token = self.get_cookie("_xsrf")
if not token:
token = binascii.b2a_hex(uuid.uuid4().bytes)
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", token, expires_days=expires_days)
self._xsrf_token = token
return self._xsrf_token
def check_xsrf_cookie(self):
"""Verifies that the '_xsrf' cookie matches the '_xsrf' argument.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
"""
if self.request.headers.get("X-Requested-With") == "XMLHttpRequest":
return
token = self.get_argument("_xsrf", None)
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
if self.xsrf_token != token:
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML <input/> element to be included with all POST forms.
It defines the _xsrf input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the 'xsrf_cookies' application setting, you must include this
HTML within all of your HTML forms.
See check_xsrf_cookie() above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path):
"""Returns a static URL for the given relative static file path.
This method requires you set the 'static_path' setting in your
application (which specifies the root directory of your static
files).
We append ?v=<signature> to the returned URL, which makes our
static file handler set an infinite expiration header on the
returned content. The signature is based on the content of the
file.
If this handler has a "include_host" attribute, we include the
full host for every static URL, including the "http://". Set
this attribute for handlers whose output needs non-relative static
path names.
"""
self.require_setting("static_path", "static_url")
if not hasattr(RequestHandler, "_static_hashes"):
RequestHandler._static_hashes = {}
hashes = RequestHandler._static_hashes
if path not in hashes:
try:
f = open(os.path.join(
self.application.settings["static_path"], path))
hashes[path] = hashlib.md5(f.read()).hexdigest()
f.close()
except:
log.err("Could not open static file %r" % path)
hashes[path] = None
base = self.request.protocol + "://" + self.request.host \
if getattr(self, "include_host", False) else ""
static_url_prefix = self.settings.get('static_url_prefix', '/static/')
if hashes.get(path):
return base + static_url_prefix + path + "?v=" + hashes[path][:5]
else:
return base + static_url_prefix + path
def async_callback(self, callback, *args, **kwargs):
"""Wrap callbacks with this if they are used on asynchronous requests.
Catches exceptions and properly finishes the request.
"""
if callback is None:
return None
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception, e:
if self._headers_written:
log.err("Exception after headers written: %s" % str(e))
else:
self._handle_request_exception(e)
return wrapper
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
return self.application.reverse_url(name, *args)
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method == "POST" and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
self.prepare()
if not self._finished:
function = getattr(self, self.request.method.lower())
if callable(function):
function(*args, **kwargs)
#d = defer.maybeDeferred(function, *args, **kwargs)
#d.addCallback(self._execute_success)
#d.addErrback(self._execute_failure)
#self.notifyFinish().addCallback(self.on_connection_close)
except Exception, e:
log.err("unexpected request exception: %s" % e)
self._handle_request_exception(e)
def _execute_success(self, ign):
if self._auto_finish and not self._finished:
self.finish()
def _execute_failure(self, err):
self._handle_request_exception(err)
def _generate_headers(self):
lines = [self.request.version + " " + str(self._status_code) + " " +
httplib.responses[self._status_code]]
lines.extend(["%s: %s" % (n, v) for n, v in self._headers.iteritems()])
for cookie_dict in getattr(self, "_new_cookies", []):
for cookie in cookie_dict.values():
lines.append("Set-Cookie: " + cookie.OutputString(None))
return "\r\n".join(lines) + "\r\n\r\n"
def _log(self):
request_time = 1000.0 * self.request.request_time()
log.msg("%d %s %.2fms" % (self._status_code, self._request_summary(), request_time))
def _request_summary(self):
return self.request.method + " " + self.request.uri + " (" + \
self.request.remote_ip + ")"
def _handle_request_exception(self, e):
try:
if isinstance(e.value, (HTTPError, HTTPAuthenticationRequired)):
e = e.value
except:
pass
if isinstance(e, (HTTPError, HTTPAuthenticationRequired)):
#if e.log_message:
# format = "%d %s: " + e.log_message
# args = [e.status_code, self._request_summary()] + list(e.args)
# msg = lambda *args: format % args
# log.err(msg(*args))
if e.status_code not in httplib.responses:
log.err("Bad HTTP status code: %d" % e.status_code)
self.send_error(500, exception=e)
else:
self.send_error(e.status_code, exception=e)
else:
log.err(e)
log.err("Uncaught exception %s :: %r" % (self._request_summary(), self.request))
self.send_error(500, exception=e)
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
class XmlrpcRequestHandler(RequestHandler):
FAILURE = 8002
NOT_FOUND = 8001
separator = "."
allowNone = False
def post(self):
self._auto_finish = False
self.set_header("Content-Type", "text/xml")
try:
args, functionPath = xmlrpclib.loads(self.request.body)
except Exception, e:
f = xmlrpclib.Fault(self.FAILURE, "can't deserialize input: %s" % e)
self._cbRender(f)
else:
try:
function = self._getFunction(functionPath)
except xmlrpclib.Fault, f:
self._cbRender(f)
else:
d = defer.maybeDeferred(function, *args)
d.addCallback(self._cbRender)
d.addErrback(self._ebRender)
def _getFunction(self, functionPath):
if functionPath.find(self.separator) != -1:
prefix, functionPath = functionPath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None:
raise xmlrpclib.Fault(self.NOT_FOUND,
"no such subHandler %s" % prefix)
return self._getFunction(functionPath)
f = getattr(self, "xmlrpc_%s" % functionPath, None)
if f is None:
raise xmlrpclib.Fault(self.NOT_FOUND,
"function %s not found" % functionPath)
elif not callable(f):
raise xmlrpclib.Fault(self.NOT_FOUND,
"function %s not callable" % functionPath)
else:
return f
def _cbRender(self, result):
if not isinstance(result, xmlrpclib.Fault):
result = (result,)
try:
s = xmlrpclib.dumps(result,
methodresponse=True, allow_none=self.allowNone)
except Exception, e:
f = Fault(self.FAILURE, "can't serialize output: %s" % e)
s = xmlrpclib.dumps(f,
methodresponse=True, allow_none=self.allowNone)
self.finish(s)
def _ebRender(self, failure):
if isinstance(failure.value, xmlrpclib.Fault):
s = failure.value
else:
s = xmlrpclib.Fault(self.FAILURE, "error")
self.finish(xmlrpclib.dumps(s, methodresponse=True))
class JsonrpcRequestHandler(RequestHandler):
def post(self, *args):
self._auto_finish = False
try:
req = escape.json_decode(self.request.body)
method = req["method"]
assert isinstance(method, types.StringTypes), type(method)
params = req["params"]
assert isinstance(params, (types.ListType, types.TupleType)), type(params)
jsonid = req["id"]
assert isinstance(jsonid, types.IntType), type(jsonid)
except Exception, e:
log.err("bad request: %s" % str(e))
raise HTTPError(400)
function = getattr(self, "jsonrpc_%s" % method, None)
if callable(function):
args = list(args) + params
d = defer.maybeDeferred(function, *args)
d.addBoth(self._cbResult, jsonid)
else:
self._cbResult(AttributeError("method not found: %s" % method), jsonid)
def _cbResult(self, result, jsonid):
if isinstance(result, failure.Failure):
error = str(result.value)
result = None
else:
error = None
json_data = escape.json_encode({"result":result, "error":error, "id":jsonid})
self.finish(json_data)
class WebSocketHandler(RequestHandler):
def __init__(self, application, request):
RequestHandler.__init__(self, application, request)
self.transport = request.connection.transport
self._wsbuffer = ""
self.nonce = None
self.k1 = None
self.k2 = None
self._postheader = False
def headersReceived(self):
pass
def connectionMade(self, *args, **kwargs):
pass
def messageReceived(self, message):
pass
def sendMessage(self, message):
if isinstance(message, dict):
message = escape.json_encode(message)
if isinstance(message, unicode):
message = message.encode("utf-8")
assert isinstance(message, str)
self.transport.write("\x00" + message + "\xff")
def _handle_request_exception(self, e):
if isinstance(e, HTTPError):
self.transport.loseConnection()
else:
log.err(e)
log.err("Uncaught exception %s :: %r" % (self._request_summary(), self.request))
self.transport.loseConnection()
def _rawDataReceived(self, data):
if len(data) == 8 and self._postheader == True:
self.nonce = data.strip()
token = self._calculate_token(self.k1, self.k2, self.nonce)
self.transport.write(
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Server: CycloneServer/"+__version__+"\r\n"
"Sec-WebSocket-Origin: " + self.request.headers["Origin"] + "\r\n"
"Sec-WebSocket-Location: ws://" + self.request.host +
self.request.path + "\r\n\r\n"+token+"\r\n")
self._postheader = False
self.flush()
return
elif ord(data[0]) & 0x80 == 0x80 and self._protocol < 76:
raise Exception("Length-encoded format not yet supported")
try:
idx = data.find("\xff")
message = data[1:idx]
self._wsbuffer = data[idx+1:]
except:
log.err("Invalid WebSocket Message: %s" % repr(data))
else:
try:
self.messageReceived(message)
except Exception, e:
self._handle_request_exception(e)
def _execute(self, transforms, *args, **kwargs):
self.request.connection.setRawMode()
self.request.connection.rawDataReceived = self._rawDataReceived
self.notifyFinish().addCallback(self.connectionLost)
try:
assert self.request.headers["Upgrade"] == "WebSocket"
assert self.request.headers["Connection"] == "Upgrade"
assert self.request.headers.get("Origin") is not None
except:
message = "Expected WebSocket Headers"
self.transport.write("HTTP/1.1 403 Forbidden\r\nContent-Length: " +
str(len(message)) + "\r\n\r\n" + message)
return self.transport.loseConnection()
else:
try:
self.headersReceived()
except Exception, e:
return self._handle_request_exception(e)
if self.request.headers.has_key('Sec-Websocket-Key1') == False or \
self.request.headers.has_key('Sec-Websocket-Key2') == False:
log.msg('Using old ws spec (draft 75)')
self.transport.write(
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Server: CycloneServer/"+__version__+"\r\n"
"WebSocket-Origin: " + self.request.headers["Origin"] + "\r\n"
"WebSocket-Location: ws://" + self.request.host +
self.request.path + "\r\n\r\n")
self._protocol = 75
else:
log.msg('Using ws draft 76 header exchange')
self.k1 = self.request.headers["Sec-WebSocket-Key1"]
self.k2 = self.request.headers["Sec-WebSocket-Key2"]
self._protocol = 76
self._postheader = True
self.connectionMade(*args, **kwargs)
def _calculate_token(self, k1, k2, k3):
token = struct.pack('>ii8s', self._filterella(k1), self._filterella(k2), k3)
return hashlib.md5(token).digest()
def _filterella(self, w):
nums = []
spaces = 0
for l in w:
if l.isdigit(): nums.append(l)
if l.isspace(): spaces = spaces + 1
x = int(''.join(nums))/spaces
return x
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call self.finish()
to finish the HTTP request. Without this decorator, the request is
automatically finished when the get() or post() method returns.
class MyRequestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to '/foo' would redirect to '/foo/' with this
decorator. Your request handler mapping should use a regular expression
like r'/foo/?' in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method == "GET":
uri = self.request.path + "/"
if self.request.query: uri += "?" + self.request.query
self.redirect(uri)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(Factory):
def buildProtocol(self):
return httpserver.HTTPConnection(self)
def __init__(self, handlers=None, default_host="", transforms=None, **settings):
if transforms is None:
self.transforms = []
if settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
self.transforms.append(ChunkedTransferEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = _O(settings)
self.ui_modules = {}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
handlers = [
(re.escape(static_url_prefix) + r"(.*)", StaticFileHandler,
dict(path=path)),
(r"/(favicon\.ico)", StaticFileHandler, dict(path=path)),
(r"/(robots\.txt)", StaticFileHandler, dict(path=path)),
] + handlers
if handlers: self.add_handlers(".*$", handlers)
# Automatically reload modified modules
#if self.settings.get("debug") and not wsgi:
#import autoreload
#autoreload.start()
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list."""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if type(spec) is type(()):
assert len(spec) in (2, 3)
pattern = spec[0]
handler = spec[1]
if len(spec) == 3:
kwargs = spec[2]
else:
kwargs = {}
spec = URLSpec(pattern, handler, kwargs)
handlers.append(spec)
if spec.name:
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
"""Adds the given OutputTransform to our transform list."""
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = request.host.lower().split(':')[0]
for pattern, handlers in self.handlers:
if pattern.match(host):
return handlers
# Look for default host if not behind load balancer (for debugging)
if "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
return handlers
return None
def _load_ui_methods(self, methods):
if type(methods) is types.ModuleType:
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in list: self._load_ui_methods(m)
else:
for name, fn in methods.iteritems():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if type(modules) is types.ModuleType:
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in list: self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.iteritems():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
"""Called by HTTPServer to execute the request."""
transforms = [t(request) for t in self.transforms]
handler = None
args = []
handlers = self._get_host_handlers(request)
if not handlers:
handler = RedirectHandler(
request, "http://" + self.default_host + "/")
else:
for spec in handlers:
match = spec.regex.match(request.path)
if match:
handler = spec.handler_class(self, request, **spec.kwargs)
args = match.groups()
break
if not handler:
handler = ErrorHandler(self, request, 404)
# In debug mode, re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if self.settings.get("debug"):
RequestHandler._templates = None
RequestHandler._static_hashes = {}
handler._execute(transforms, *args)
return handler
def reverse_url(self, name, *args):
"""Returns a URL path for handler named `name`
The handler must be added to the application as a named URLSpec
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response."""
def __init__(self, status_code, log_message=None, *args):
self.status_code = status_code
self.log_message = log_message
self.args = args
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code, httplib.responses[self.status_code])
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class HTTPAuthenticationRequired(HTTPError):
"""An exception that will turn into an HTTP Authentication Required response"""
def __init__(self, auth_type="Basic", realm="Restricted Access", **kwargs):
self.status_code = 401
self.log_message = None
self.auth_type = auth_type
self.kwargs = kwargs
self.kwargs["realm"] = realm
class ErrorHandler(RequestHandler):
"""Generates an error response with status_code for all requests."""
def __init__(self, application, request, status_code):
RequestHandler.__init__(self, application, request)
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument "url" to the handler, e.g.:
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def __init__(self, application, request, url, permanent=True):
RequestHandler.__init__(self, application, request)
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
To map a path to this handler for a static data directory /var/www,
you would add a line to your application like:
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The local root directory of the content should be passed as the "path"
argument to the handler.
To support aggressive browser caching, if the argument "v" is given
with the path, we set an infinite HTTP expiration header. So, if you
want browsers to cache a file indefinitely, send them to, e.g.,
/static/images/myimage.png?v=xxx.
"""
def __init__(self, application, request, path):
RequestHandler.__init__(self, application, request)
self.root = os.path.abspath(path) + "/"
def head(self, path):
self.get(path, include_body=False)
def get(self, path, include_body=True):
abspath = os.path.abspath(os.path.join(self.root, path))
if not abspath.startswith(self.root):
raise HTTPError(403, "%s is not in root static directory", path)
if not os.path.exists(abspath):
raise HTTPError(404)
if not os.path.isfile(abspath):
raise HTTPError(403, "%s is not a file", path)
stat_result = os.stat(abspath)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
if "v" in self.request.arguments:
self.set_header("Expires", datetime.datetime.utcnow() + \
datetime.timedelta(days=365*10))
self.set_header("Cache-Control", "max-age=" + str(86400*365*10))
else:
self.set_header("Cache-Control", "public")
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
if not include_body:
return
self.set_header("Content-Length", stat_result[stat.ST_SIZE])
file = open(abspath, "r")
try:
self.write(file.read())
finally:
file.close()
class FallbackHandler(RequestHandler):
"""A RequestHandler that wraps another HTTP server callback.
The fallback is a callable object that accepts an HTTPRequest,
such as an Application or tornado.wsgi.WSGIContainer. This is most
useful to use both tornado RequestHandlers and WSGI in the same server.
Typical usage:
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def __init__(self, app, request, fallback):
RequestHandler.__init__(self, app, request)
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
A new transform instance is created for every request. See the
ChunkedTransferEncoding example below if you want to implement a
new Transform.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, headers, chunk, finishing):
return headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
"""
CONTENT_TYPES = set([
"text/plain", "text/html", "text/css", "text/xml",
"application/x-javascript", "application/xml", "application/atom+xml",
"text/javascript", "application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = request.supports_http_1_1() and \
"gzip" in request.headers.get("Accept-Encoding", "")
def transform_first_chunk(self, headers, chunk, finishing):
if self._gzipping:
ctype = headers.get("Content-Type", "").split(";")[0]
self._gzipping = (ctype in self.CONTENT_TYPES) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
(finishing or "Content-Length" not in headers) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = cStringIO.StringIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
self._gzip_pos = 0
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
if self._gzip_pos > 0:
chunk = chunk[self._gzip_pos:]
self._gzip_pos += len(chunk)
return chunk
class ChunkedTransferEncoding(OutputTransform):
"""Applies the chunked transfer encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
"""
def __init__(self, request):
self._chunking = request.supports_http_1_1()
def transform_first_chunk(self, headers, chunk, finishing):
if self._chunking:
# No need to chunk the output if a Content-Length is specified
if "Content-Length" in headers or "Transfer-Encoding" in headers:
self._chunking = False
else:
headers["Transfer-Encoding"] = "chunked"
chunk = self.transform_chunk(chunk, finishing)
return headers, chunk
def transform_chunk(self, block, finishing):
if self._chunking:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
if block:
block = ("%x" % len(block)) + "\r\n" + block + "\r\n"
if finishing:
block += "0\r\n\r\n"
return block
def authenticated(method):
"""Decorate methods with this to require that the user be logged in."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method == "GET":
url = self.get_login_url()
if "?" not in url:
url += "?" + urllib.urlencode(dict(next=self.request.uri))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A UI re-usable, modular unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.current_user = handler.current_user
self.locale = handler.locale
def render(self, *args, **kwargs):
raise NotImplementedError()
def embedded_javascript(self):
"""Returns a JavaScript string that will be embedded in the page."""
return None
def javascript_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def embedded_css(self):
"""Returns a CSS string that will be embedded in the page."""
return None
def css_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def html_head(self):
"""Returns a CSS string that will be put in the <head/> element"""
return None
def render_string(self, path, **kwargs):
return self.handler.render_string(path, **kwargs)
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler_class, kwargs={}, name=None):
"""Creates a URLSpec.
Parameters:
pattern: Regular expression to be matched. Any groups in the regex
will be passed in to the handler's get/post/etc methods as
arguments.
handler_class: RequestHandler subclass to be invoked.
kwargs (optional): A dictionary of additional arguments to be passed
to the handler's constructor.
name (optional): A name for this handler. Used by
Application.reverse_url.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
self.handler_class = handler_class
self.kwargs = kwargs
self.name = name
self._path, self._group_count = self._find_groups()
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
return self._path % tuple([str(a) for a in args])
url = URLSpec
def _utf8(s):
if isinstance(s, unicode):
return s.encode("utf-8")
assert isinstance(s, str)
return s
def _unicode(s):
if isinstance(s, str):
try:
return s.decode("utf-8")
except UnicodeDecodeError:
raise HTTPError(400, "Non-utf8 argument")
assert isinstance(s, unicode)
return s
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
class _O(dict):
"""Makes a dictionary behave like an object."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return None
#raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
| 38.217391
| 92
| 0.591146
|
92951d5fc02a6cecd20270def94c217e2216e2a9
| 4,186
|
py
|
Python
|
notebook/util.py
|
hampen2929/CenterTrack
|
7ccb4a07637785df4a450f312845bc37b2e53ac0
|
[
"MIT"
] | 3
|
2020-04-17T22:19:31.000Z
|
2022-01-07T22:01:16.000Z
|
notebook/util.py
|
hampen2929/CenterTrack
|
7ccb4a07637785df4a450f312845bc37b2e53ac0
|
[
"MIT"
] | null | null | null |
notebook/util.py
|
hampen2929/CenterTrack
|
7ccb4a07637785df4a450f312845bc37b2e53ac0
|
[
"MIT"
] | 1
|
2021-05-13T04:47:49.000Z
|
2021-05-13T04:47:49.000Z
|
import os
import json
from glob import glob
# court_label_dict = {
# '0': 'center',
# '1': 'singles_upper_right',
# '2': 'singles_upper_left',
# '3': 'singles_lower_left',
# '4': 'singles_lower_right',
# '5': 'doubles_upper_right',
# '6': 'doubles_upper_left',
# '7': 'doubles_lower_left',
# '8': 'doubles_lower_right',
# '9': 'service_upper_right',
# '10': 'service_upper_center',
# '11': 'service_upper_left',
# '12': 'service_lower_left',
# '13': 'service_lower_center',
# '14': 'service_lower_right',
# }
category_id_dict = {
'person': 1,
'ball': 2,
'doubles_upper_right': 3,
'doubles_upper_left': 3,
'doubles_lower_left': 3,
'doubles_lower_right': 3,
'singles_upper_right': 4,
'singles_upper_left': 4,
'singles_lower_left': 4,
'singles_lower_right': 4,
'service_upper_right': 5,
'service_upper_center': 5,
'service_upper_left': 5,
'service_lower_left': 5,
'service_lower_center': 5,
'service_lower_right': 5,
'center': 0,
}
def load_json(data_path):
f = open(data_path, 'r')
jsonData = json.load(f)
f.close()
return jsonData
def save_json(label, save_path):
f = open(save_path, "w")
json.dump(label, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
def get_json_file_paths(data_dir):
data_path = os.path.join(data_dir, '*.json')
label_file_paths = glob(data_path)
label_file_paths.sort()
return label_file_paths
def get_info():
file_dict = {
'description': 'dummy',
'url': 'dummy',
'version': 'dummy',
'year': 2045,
'contributor': 'dummy',
'date_created': '1993/01/24'
}
return file_dict
def get_licenses():
file_dict = {
'url': 'dummy',
'id': 1,
'name': 'dummy'}
return file_dict
def get_images(label_file, image_id, file_name=None):
file_dict = {
'license': 1,
'file_name': None,
'coco_url': 'dummy',
'height': None,
'width': None,
'date_captured': 'dummy',
'flickr_url': 'dummy',
'id': 99999
}
if file_name is None:
file_name = label_file['imagePath']
height = label_file['imageHeight']
width = label_file['imageWidth']
file_dict['file_name'] = file_name
file_dict['height'] = height
file_dict['width'] = width
file_dict['id'] = image_id
return file_dict
def get_annotations(shape, image_id, annotation_id, target_labels=None):
file_dict = {
'segmentation': [[]],
'area': None,
'iscrowd': 0,
'image_id': 99999,
'bbox': [],
'category_id': None,
'id': 999
}
point = shape['points']
xmin, ymin = point[0]
xmax, ymax = point[1]
width = xmax - xmin
height = ymax - ymin
area = width * height
bbox = [xmin, ymin, width, height]
label = shape['label']
if isinstance(label, str):
if label.isdecimal():
category_id = int(label)
else:
category_id = category_id_dict[label]
else:
msg = 'label must be str not {}'.format(type(label))
raise ValueError(msg)
file_dict['area'] = area
file_dict['bbox'] = bbox
file_dict['category_id'] = category_id
file_dict['image_id'] = image_id
file_dict['id'] = annotation_id
return file_dict
def get_categories():
# categories = [
# {'supercategory': 'court_edge', 'id': 1, 'name': 'court_edge'},
# {'supercategory': 'court_edge', 'id': 2, 'name': 'doubles_lower_left'},
# {'supercategory': 'court_edge', 'id': 3, 'name': 'doubles_lower_right'},
# {'supercategory': 'court_edge', 'id': 4, 'name': 'doubles_upper_left'},
# {'supercategory': 'court_edge', 'id': 5, 'name': 'doubles_upper_right'}
# ]
categories = [
{'supercategory': 'person', 'id': 1, 'name': 'person'},
{'supercategory': 'ball', 'id': 2, 'name': 'ball'},
{'supercategory': 'court', 'id': 3, 'name': 'court_edge'},
]
return categories
| 25.680982
| 93
| 0.571906
|
fde28e832f3dfb134bd0c9015af5cc225500362a
| 20,215
|
py
|
Python
|
datalad/distribution/tests/test_uninstall.py
|
driusan/datalad
|
97f990caff182547845e1096a4044d9cd17e6a31
|
[
"MIT"
] | null | null | null |
datalad/distribution/tests/test_uninstall.py
|
driusan/datalad
|
97f990caff182547845e1096a4044d9cd17e6a31
|
[
"MIT"
] | null | null | null |
datalad/distribution/tests/test_uninstall.py
|
driusan/datalad
|
97f990caff182547845e1096a4044d9cd17e6a31
|
[
"MIT"
] | null | null | null |
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test uninstall action
"""
from datalad.tests.utils import known_failure_v6
from datalad.tests.utils import known_failure_direct_mode
import os
from os.path import join as opj, split as psplit
from os.path import exists, lexists
from os.path import realpath
from os.path import isdir
from glob import glob
from datalad.api import uninstall
from datalad.api import drop
from datalad.api import remove
from datalad.api import install
from datalad.api import create
from datalad.support.exceptions import InsufficientArgumentsError
from datalad.support.exceptions import IncompleteResultsError
from datalad.tests.utils import ok_
from datalad.tests.utils import eq_
from datalad.tests.utils import with_testrepos
from datalad.tests.utils import SkipTest
from datalad.tests.utils import assert_raises
from datalad.tests.utils import assert_status
from datalad.tests.utils import assert_in
from datalad.tests.utils import assert_result_count
from datalad.tests.utils import assert_result_values_cond
from datalad.tests.utils import ok_file_under_git
from datalad.tests.utils import ok_clean_git
from datalad.tests.utils import with_tempfile
from datalad.tests.utils import with_tree
from datalad.tests.utils import create_tree
from datalad.tests.utils import skip_if_no_network
from datalad.tests.utils import use_cassette
from datalad.tests.utils import usecase
from datalad.utils import chpwd
from datalad.utils import _path_
from datalad.support.external_versions import external_versions
from ..dataset import Dataset
@with_tempfile()
def test_safetynet(path):
ds = Dataset(path).create()
os.makedirs(opj(ds.path, 'deep', 'down'))
for p in (ds.path, opj(ds.path, 'deep'), opj(ds.path, 'deep', 'down')):
with chpwd(p):
# will never remove PWD, or anything outside the dataset
for target in (ds.path, os.curdir, os.pardir, opj(os.pardir, os.pardir)):
assert_status(
('error', 'impossible'),
uninstall(path=target, on_failure='ignore'))
@with_tempfile()
def test_uninstall_uninstalled(path):
# goal oriented error reporting. here:
# nothing installed, any removal was already a success before it started
ds = Dataset(path)
assert_status('error', ds.drop(on_failure="ignore"))
assert_status('error', ds.uninstall(on_failure='ignore'))
assert_status('notneeded', ds.remove())
@with_tempfile()
@known_failure_direct_mode #FIXME
def test_clean_subds_removal(path):
ds = Dataset(path).create()
subds1 = ds.create('one')
subds2 = ds.create('two')
eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['one', 'two'])
ok_clean_git(ds.path)
# now kill one
res = ds.remove('one', result_xfm=None)
# subds1 got uninstalled, and ds got the removal of subds1 saved
assert_result_count(res, 1, path=subds1.path, action='uninstall', status='ok')
assert_result_count(res, 1, path=subds1.path, action='remove', status='ok')
assert_result_count(res, 1, path=ds.path, action='save', status='ok')
ok_(not subds1.is_installed())
ok_clean_git(ds.path)
# two must remain
eq_(ds.subdatasets(result_xfm='relpaths'), ['two'])
# one is gone
assert(not exists(subds1.path))
# and now again, but this time remove something that is not installed
ds.create('three')
ds.save()
eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['three', 'two'])
ds.uninstall('two')
ok_clean_git(ds.path)
eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['three', 'two'])
ok_(not subds2.is_installed())
assert(exists(subds2.path))
res = ds.remove('two', result_xfm='datasets')
ok_clean_git(ds.path)
# subds2 was already uninstalled, now ds got the removal of subds2 saved
assert(not exists(subds2.path))
eq_(ds.subdatasets(result_xfm='relpaths'), ['three'])
eq_(res, [subds2, ds])
@with_testrepos('.*basic.*', flavors=['clone'])
def test_uninstall_invalid(path):
ds = Dataset(path).create(force=True)
for method in (uninstall, remove, drop):
assert_raises(InsufficientArgumentsError, method)
# refuse to touch stuff outside the dataset
assert_status('error', method(dataset=ds, path='..', on_failure='ignore'))
# same if it doesn't exist, for consistency
assert_status('error', method(dataset=ds, path='../madeupnonexist', on_failure='ignore'))
@with_testrepos('basic_annex', flavors=['clone'])
def test_uninstall_annex_file(path):
ds = Dataset(path)
ok_(ds.is_installed())
ok_file_under_git(ds.repo.path, 'test-annex.dat', annexed=True)
ds.repo.get('test-annex.dat')
ok_(ds.repo.file_has_content('test-annex.dat'))
# remove file's content:
res = ds.drop(path='test-annex.dat', result_xfm='paths')
# test it happened:
ok_(not ds.repo.file_has_content('test-annex.dat'))
ok_file_under_git(ds.repo.path, 'test-annex.dat', annexed=True)
# test result:
eq_(res, [opj(ds.path, 'test-annex.dat')])
ds.repo.get('test-annex.dat')
# remove file:
ds.remove(path='test-annex.dat')
assert_raises(AssertionError, ok_file_under_git, ds.repo.path, 'test-annex.dat',
annexed=True)
assert_raises(AssertionError, ok_file_under_git, ds.repo.path, 'test-annex.dat',
annexed=False)
ok_(not exists(opj(path, 'test-annex.dat')))
@known_failure_v6 # FIXME: git files end up in annex, therefore drop result is different
@with_testrepos('.*basic.*', flavors=['clone'])
def test_uninstall_git_file(path):
ds = Dataset(path)
ok_(ds.is_installed())
ok_(exists(opj(path, 'INFO.txt')))
ok_file_under_git(ds.repo.path, 'INFO.txt')
# drop file in Git in an annex repo
# regardless of the type of repo this is 'notneeded'...
# it is less about education that about "can we
# we get the content back?", and for a file in Git we can
assert_result_count(
ds.drop(path='INFO.txt'),
1,
status='notneeded',
message="no annex'ed content")
res = ds.uninstall(path="INFO.txt", on_failure='ignore')
assert_result_count(
res, 1,
status='impossible',
message='can only uninstall datasets (consider the `drop` command)')
# remove the file:
res = ds.remove(path='INFO.txt', result_xfm='paths',
result_filter=lambda x: x['action'] == 'remove')
assert_raises(AssertionError, ok_file_under_git, ds.repo.path, 'INFO.txt')
ok_(not exists(opj(path, 'INFO.txt')))
eq_(res, ['INFO.txt'])
@known_failure_v6 #FIXME Note: Failure seems to somehow be depend on PY2/PY3
@with_testrepos('submodule_annex', flavors=['local'])
@with_tempfile(mkdir=True)
def test_uninstall_subdataset(src, dst):
ds = install(dst, source=src, recursive=True)
ok_(ds.is_installed())
known_subdss = ds.subdatasets(result_xfm='datasets')
for subds in ds.subdatasets(result_xfm='datasets'):
ok_(subds.is_installed())
annexed_files = subds.repo.get_annexed_files()
subds.repo.get(annexed_files)
# drop data of subds:
res = ds.drop(path=subds.path, result_xfm='paths')
ok_(all([opj(subds.path, f) in res for f in annexed_files]))
ok_(all([not i for i in subds.repo.file_has_content(annexed_files)]))
# subdataset is still known
assert_in(subds.path, ds.subdatasets(result_xfm='paths'))
eq_(ds.subdatasets(result_xfm='datasets'), known_subdss)
for subds in ds.subdatasets(result_xfm='datasets'):
# uninstall subds itself:
if os.environ.get('DATALAD_TESTS_DATALADREMOTE') \
and external_versions['git'] < '2.0.9':
raise SkipTest(
"Known problem with GitPython. See "
"https://github.com/gitpython-developers/GitPython/pull/521")
res = ds.uninstall(path=subds.path, result_xfm='datasets')
eq_(res[0], subds)
ok_(not subds.is_installed())
# just a deinit must not remove the subdataset registration
eq_(ds.subdatasets(result_xfm='datasets'), known_subdss)
# mountpoint of subdataset should still be there
ok_(exists(subds.path))
@with_tree({
'deep': {
'dir': {
'keep': 'keep1', 'kill': 'kill1'}},
'keep': 'keep2',
'kill': 'kill2'})
@known_failure_direct_mode #FIXME
def test_uninstall_multiple_paths(path):
ds = Dataset(path).create(force=True, save=False)
subds = ds.create('deep', force=True)
subds.add('.', recursive=True)
ok_clean_git(subds.path)
# needs to be able to add a combination of staged files, modified submodule,
# and untracked files
ds.add('.', recursive=True)
ok_clean_git(ds.path)
# drop content of all 'kill' files
topfile = 'kill'
deepfile = opj('deep', 'dir', 'kill')
# use a tuple not a list! should also work
ds.drop((topfile, deepfile), check=False)
ok_clean_git(ds.path)
files_left = glob(opj(ds.path, '*', '*', '*')) + glob(opj(ds.path, '*'))
ok_(all([f.endswith('keep') for f in files_left if exists(f) and not isdir(f)]))
ok_(not ds.repo.file_has_content(topfile))
ok_(not subds.repo.file_has_content(opj(*psplit(deepfile)[1:])))
# remove handles for all 'kill' files
ds.remove([topfile, deepfile], check=False)
ok_clean_git(ds.path)
files_left = glob(opj(ds.path, '*', '*', '*')) + glob(opj(ds.path, '*'))
ok_(all([f.endswith('keep') for f in files_left if exists(f) and not isdir(f)]))
ok_(not any([f.endswith(topfile) for f in files_left]))
@with_tempfile()
def test_uninstall_dataset(path):
ds = Dataset(path)
ok_(not ds.is_installed())
ds.create()
ok_(ds.is_installed())
ok_clean_git(ds.path)
# would only drop data
ds.drop()
# actually same as this, for cmdline compat reasons
ds.drop(path=[])
ok_clean_git(ds.path)
# removing entire dataset, uninstall will refuse to act on top-level
# datasets
assert_raises(IncompleteResultsError, ds.uninstall)
ds.remove()
# completely gone
ok_(not ds.is_installed())
ok_(not exists(ds.path))
@with_tree({'one': 'test', 'two': 'test', 'three': 'test2'})
@known_failure_direct_mode #FIXME
def test_remove_file_handle_only(path):
ds = Dataset(path).create(force=True)
ds.add(os.curdir)
ok_clean_git(ds.path)
# make sure there is any key
ok_(len(ds.repo.get_file_key('one')))
# both files link to the same key
eq_(ds.repo.get_file_key('one'),
ds.repo.get_file_key('two'))
rpath_one = realpath(opj(ds.path, 'one'))
eq_(rpath_one, realpath(opj(ds.path, 'two')))
path_two = opj(ds.path, 'two')
ok_(exists(path_two))
# remove one handle, should not affect the other
ds.remove('two', check=False, message="custom msg")
eq_(ds.repo.repo.head.commit.message.rstrip(), "custom msg")
eq_(rpath_one, realpath(opj(ds.path, 'one')))
ok_(exists(rpath_one))
ok_(not exists(path_two))
# remove file without specifying the dataset -- shouldn't fail
with chpwd(path):
remove('one', check=False)
ok_(not exists("one"))
# and we should be able to remove without saving
ds.remove('three', check=False, save=False)
ok_(ds.repo.is_dirty())
@with_tree({'deep': {'dir': {'test': 'testcontent'}}})
@known_failure_direct_mode #FIXME
def test_uninstall_recursive(path):
ds = Dataset(path).create(force=True)
subds = ds.create('deep', force=True)
# we add one file, but we get a response for the requested
# directory too
res = subds.add('.')
assert_result_count(res, 1, action='add', status='ok', type='file')
assert_result_count(res, 1, action='save', status='ok', type='dataset')
# save all -> all clean
ds.save(recursive=True)
ok_clean_git(subds.path)
ok_clean_git(ds.path)
# now uninstall in subdataset through superdataset
target_fname = opj('deep', 'dir', 'test')
# sane starting point
ok_(exists(opj(ds.path, target_fname)))
# doesn't have the minimum number of copies for a safe drop
res = ds.drop(target_fname, recursive=True, on_failure='ignore')
assert_status('error', res)
assert_result_values_cond(
res, 'message',
lambda x: "configured minimum number of copies not found" in x or
"Could only verify the existence of 0 out of 1 necessary copies" in x
)
# this should do it
ds.drop(target_fname, check=False, recursive=True)
# link is dead
lname = opj(ds.path, target_fname)
ok_(not exists(lname))
# entire hierarchy saved
ok_clean_git(subds.path)
ok_clean_git(ds.path)
# now same with actual handle removal
# content is dropped already, so no checks in place anyway
ds.remove(target_fname, check=True, recursive=True)
ok_(not exists(lname) and not lexists(lname))
ok_clean_git(subds.path)
ok_clean_git(ds.path)
@with_tempfile()
def test_remove_dataset_hierarchy(path):
ds = Dataset(path).create()
ds.create('deep')
ok_clean_git(ds.path)
# fail on missing --recursive because subdataset is present
assert_raises(IncompleteResultsError, ds.remove)
ok_clean_git(ds.path)
ds.remove(recursive=True)
# completely gone
ok_(not ds.is_installed())
ok_(not exists(ds.path))
# now do it again, but without a reference dataset
ds = Dataset(path).create()
ds.create('deep')
ok_clean_git(ds.path)
remove(ds.path, recursive=True)
# completely gone
ok_(not ds.is_installed())
ok_(not exists(ds.path))
@with_tempfile()
@known_failure_direct_mode #FIXME
def test_careless_subdataset_uninstall(path):
# nested datasets
ds = Dataset(path).create()
subds1 = ds.create('deep1')
ds.create('deep2')
eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['deep1', 'deep2'])
ok_clean_git(ds.path)
# now we kill the sub without the parent knowing
subds1.uninstall()
ok_(not subds1.is_installed())
# mountpoint exists
ok_(exists(subds1.path))
ok_clean_git(ds.path)
# parent still knows the sub
eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['deep1', 'deep2'])
@with_tempfile()
@known_failure_direct_mode #FIXME
def test_kill(path):
# nested datasets with load
ds = Dataset(path).create()
testfile = opj(ds.path, "file.dat")
with open(testfile, 'w') as f:
f.write("load")
ds.add("file.dat")
subds = ds.create('deep1')
eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['deep1'])
ok_clean_git(ds.path)
# and we fail to remove since content can't be dropped
res = ds.remove(on_failure='ignore')
assert_result_count(
res, 1,
status='error', path=testfile)
# Following two assertions on message are relying on the actual error.
# We have a second result with status 'impossible' for the ds, that we need
# to filter out for those assertions:
err_result = [r for r in res if r['status'] == 'error'][0]
assert_result_values_cond(
[err_result], 'message',
lambda x: "configured minimum number of copies not found" in x or
"Could only verify the existence of 0 out of 1 necessary copies" in x
)
eq_(ds.remove(recursive=True, check=False, result_xfm='datasets'),
[subds, ds])
ok_(not exists(path))
@with_tempfile()
def test_remove_recreation(path):
# test recreation is possible and doesn't conflict with in-memory
# remainings of the old instances
# see issue #1311
ds = create(path)
ds.remove()
ds = create(path)
ok_clean_git(ds.path)
ok_(ds.is_installed())
@with_tempfile()
def test_no_interaction_with_untracked_content(path):
# extracted from what was a metadata test originally
ds = Dataset(opj(path, 'origin')).create(force=True)
create_tree(ds.path, {'sub': {'subsub': {'dat': 'lots of data'}}})
subds = ds.create('sub', force=True)
subds.remove(opj('.datalad', 'config'), if_dirty='ignore')
ok_(not exists(opj(subds.path, '.datalad', 'config')))
# this will only work, if `remove` didn't do anything stupid and
# caused all content to be saved
subds.create('subsub', force=True)
@with_tempfile()
def test_remove_nowhining(path):
# when removing a dataset under a dataset (but not a subdataset)
# should not provide a meaningless message that something was not right
ds = create(path)
# just install/clone inside of it
subds_path = _path_(path, 'subds')
install(subds_path, source=path)
remove(subds_path) # should remove just fine
@usecase
@known_failure_v6 # https://github.com/datalad/datalad/pull/2391#issuecomment-379414293
@skip_if_no_network
@with_tempfile(mkdir=True)
@use_cassette('test_remove_recursive_2')
def test_remove_recursive_2(tdir):
# fails in some cases https://github.com/datalad/datalad/issues/1573
with chpwd(tdir):
install('///labs')
install('labs/tarr/face_place')
remove('labs', recursive=True)
@with_tempfile(mkdir=True)
@known_failure_direct_mode #FIXME
def test_failon_nodrop(path):
# test to make sure that we do not wipe out data when checks are enabled
# despite the general error behavior mode
ds = Dataset(path).create()
# we play with a subdataset to bypass the tests that prevent the removal
# of top-level datasets
sub = ds.create('sub')
create_tree(sub.path, {'test': 'content'})
ds.add(opj('sub', 'test'))
ok_clean_git(ds.path)
eq_(['test'], sub.repo.get_annexed_files(with_content_only=True))
# we put one file into the dataset's annex, no redundant copies
# neither uninstall nor remove should work
res = ds.uninstall('sub', check=True, on_failure='ignore')
assert_status(['error', 'impossible'], res)
eq_(['test'], sub.repo.get_annexed_files(with_content_only=True))
# same with remove
res = ds.remove('sub', check=True, on_failure='ignore')
assert_status(['error', 'impossible'], res)
eq_(['test'], sub.repo.get_annexed_files(with_content_only=True))
@with_tempfile(mkdir=True)
def test_uninstall_without_super(path):
# a parent dataset with a proper subdataset, and another dataset that
# is just placed underneath the parent, but not an actual subdataset
parent = Dataset(path).create()
sub = parent.create('sub')
ok_clean_git(parent.path)
nosub = create(opj(parent.path, 'nosub'))
ok_clean_git(nosub.path)
subreport = parent.subdatasets()
assert_result_count(subreport, 1, path=sub.path)
assert_result_count(subreport, 0, path=nosub.path)
# it should be possible to uninstall the proper subdataset, even without
# explicitly calling the uninstall methods of the parent -- things should
# be figured out by datalad
uninstall(sub.path)
assert not sub.is_installed()
# no present subdatasets anymore
subreport = parent.subdatasets()
assert_result_count(subreport, 1)
assert_result_count(subreport, 1, path=sub.path, state='absent')
assert_result_count(subreport, 0, path=nosub.path)
# but we should fail on an attempt to uninstall the non-subdataset
res = uninstall(nosub.path, on_failure='ignore')
assert_result_count(
res, 1, path=nosub.path, status='error',
message="will not uninstall top-level dataset (consider `remove` command)")
@with_tempfile(mkdir=True)
def test_drop_nocrash_absent_subds(path):
parent = Dataset(path).create()
sub = parent.create('sub')
parent.uninstall('sub')
ok_clean_git(parent.path)
with chpwd(path):
assert_status('notneeded', drop('.', recursive=True))
@with_tree({'one': 'one', 'two': 'two', 'three': 'three'})
def test_remove_more_than_one(path):
ds = Dataset(path).create(force=True)
ds.add('.')
ok_clean_git(path)
# ensure #1912 stays resolved
ds.remove(['one', 'two'], check=False)
ok_clean_git(path)
| 36.956124
| 97
| 0.680188
|
521ec627cd052031ba239215dd1b09a13a9a642b
| 12,440
|
py
|
Python
|
mycroft/util/lang/format_sv.py
|
JulianaCP/mycroft-core
|
2bfcb948b73a86a1625a8490c12bc031c2ff0379
|
[
"Apache-2.0"
] | 1
|
2019-12-07T18:10:56.000Z
|
2019-12-07T18:10:56.000Z
|
mycroft/util/lang/format_sv.py
|
JulianaCP/mycroft-core
|
2bfcb948b73a86a1625a8490c12bc031c2ff0379
|
[
"Apache-2.0"
] | null | null | null |
mycroft/util/lang/format_sv.py
|
JulianaCP/mycroft-core
|
2bfcb948b73a86a1625a8490c12bc031c2ff0379
|
[
"Apache-2.0"
] | 1
|
2019-12-09T13:42:51.000Z
|
2019-12-09T13:42:51.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.util.lang.format_common import convert_to_mixed_fraction
from math import floor
months = ['januari', 'februari', 'mars', 'april', 'maj', 'juni',
'juli', 'augusti', 'september', 'oktober', 'november',
'december']
NUM_STRING_SV = {
0: 'noll',
1: 'en',
2: 'två',
3: 'tre',
4: 'fyra',
5: 'fem',
6: 'sex',
7: 'sju',
8: 'åtta',
9: 'nio',
10: 'tio',
11: 'elva',
12: 'tolv',
13: 'tretton',
14: 'fjorton',
15: 'femton',
16: 'sexton',
17: 'sjutton',
18: 'arton',
19: 'nitton',
20: 'tjugo',
30: 'trettio',
40: 'fyrtio',
50: 'femtio',
60: 'sextio',
70: 'sjuttio',
80: 'åttio',
90: 'nittio',
100: 'hundra'
}
NUM_POWERS_OF_TEN = [
'hundra',
'tusen',
'miljon',
'miljard',
'biljon',
'biljard',
'triljon',
'triljard'
]
FRACTION_STRING_SV = {
2: 'halv',
3: 'tredjedel',
4: 'fjärdedel',
5: 'femtedel',
6: 'sjättedel',
7: 'sjundedel',
8: 'åttondel',
9: 'niondel',
10: 'tiondel',
11: 'elftedel',
12: 'tolftedel',
13: 'trettondel',
14: 'fjortondel',
15: 'femtondel',
16: 'sextondel',
17: 'sjuttondel',
18: 'artondel',
19: 'nittondel',
20: 'tjugondel'
}
EXTRA_SPACE = " "
def nice_number_sv(number, speech, denominators=range(1, 21)):
""" Swedish helper for nice_number
This function formats a float to human understandable functions. Like
4.5 becomes "4 och en halv" for speech and "4 1/2" for text
Args:
number (int or float): the float to format
speech (bool): format for speech (True) or display (False)
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_to_mixed_fraction(number, denominators)
if not result:
# Give up, just represent as a 3 decimal number
return str(round(number, 3))
whole, num, den = result
if not speech:
if num == 0:
# TODO: Number grouping? E.g. "1,000,000"
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
if num == 0:
return str(whole)
den_str = FRACTION_STRING_SV[den]
if whole == 0:
if num == 1:
return_string = 'en {}'.format(den_str)
else:
return_string = '{} {}'.format(num, den_str)
elif num == 1:
return_string = '{} och en {}'.format(whole, den_str)
else:
return_string = '{} och {} {}'.format(whole, num, den_str)
if num > 1:
return_string += 'ar'
return return_string
def pronounce_number_sv(num, places=2):
"""
Convert a number to its spoken equivalent
For example, '5.2' would return 'five point two'
Args:
num(float or int): the number to pronounce (set limit below)
places(int): maximum decimal places to speak
Returns:
(str): The pronounced number
"""
def pronounce_triplet_sv(num):
result = ""
num = floor(num)
if num > 99:
hundreds = floor(num / 100)
if hundreds > 0:
if hundreds == 1:
result += 'ett' + 'hundra'
else:
result += NUM_STRING_SV[hundreds] + 'hundra'
num -= hundreds * 100
if num == 0:
result += '' # do nothing
elif num == 1:
result += 'ett'
elif num <= 20:
result += NUM_STRING_SV[num]
elif num > 20:
tens = num % 10
ones = num - tens
if ones > 0:
result += NUM_STRING_SV[ones]
if tens > 0:
result += NUM_STRING_SV[tens]
return result
def pronounce_fractional_sv(num, places):
# fixed number of places even with trailing zeros
result = ""
place = 10
while places > 0:
# doesn't work with 1.0001 and places = 2: int(
# num*place) % 10 > 0 and places > 0:
result += " " + NUM_STRING_SV[int(num * place) % 10]
place *= 10
places -= 1
return result
def pronounce_whole_number_sv(num, scale_level=0):
if num == 0:
return ''
num = floor(num)
result = ''
last_triplet = num % 1000
if last_triplet == 1:
if scale_level == 0:
if result != '':
result += '' + 'ett'
else:
result += 'en'
elif scale_level == 1:
result += 'ettusen' + EXTRA_SPACE
else:
result += 'en ' + NUM_POWERS_OF_TEN[scale_level] + EXTRA_SPACE
elif last_triplet > 1:
result += pronounce_triplet_sv(last_triplet)
if scale_level == 1:
result += 'tusen' + EXTRA_SPACE
if scale_level >= 2:
result += NUM_POWERS_OF_TEN[scale_level]
if scale_level >= 2:
result += 'er' + EXTRA_SPACE # MiljonER
num = floor(num / 1000)
scale_level += 1
return pronounce_whole_number_sv(num, scale_level) + result
result = ""
if abs(num) >= 1000000000000000000000000: # cannot do more than this
return str(num)
elif num == 0:
return str(NUM_STRING_SV[0])
elif num < 0:
return "minus " + pronounce_number_sv(abs(num), places)
else:
if num == int(num):
return pronounce_whole_number_sv(num)
else:
whole_number_part = floor(num)
fractional_part = num - whole_number_part
result += pronounce_whole_number_sv(whole_number_part)
if places > 0:
result += " komma"
result += pronounce_fractional_sv(fractional_part, places)
return result
def pronounce_ordinal_sv(num):
# ordinals for 1, 3, 7 and 8 are irregular
# this produces the base form, it will have to be adapted for genus,
# casus, numerus
ordinals = ["noll", "första", "andra", "tredje", "fjärde", "femte",
"sjätte", "sjunde", "åttonde", "nionde", "tionde"]
tens = int(floor(num / 10.0)) * 10
ones = num % 10
if num < 0 or num != int(num):
return num
if num == 0:
return ordinals[num]
result = ""
if num > 10:
result += pronounce_number_sv(tens).rstrip()
if ones > 0:
result += ordinals[ones]
else:
result += 'de'
return result
def nice_time_sv(dt, speech=True, use_24hour=False, use_ampm=False):
"""
Format a time to a comfortable human format
For example, generate 'five thirty' for speech or '5:30' for
text display.
Args:
dt (datetime): date to format (assumes already in local timezone)
speech (bool): format for speech (default/True) or display (False)=Fal
use_24hour (bool): output in 24-hour/military or 12-hour format
use_ampm (bool): include the am/pm for 12-hour format
Returns:
(str): The formatted time string
"""
if use_24hour:
# e.g. "03:01" or "14:22"
string = dt.strftime("%H:%M")
else:
if use_ampm:
# e.g. "3:01 AM" or "2:22 PM"
string = dt.strftime("%I:%M %p")
else:
# e.g. "3:01" or "2:22"
string = dt.strftime("%I:%M")
if not speech:
return string
# Generate a speakable version of the time
speak = ""
if use_24hour:
if dt.hour == 1:
speak += "ett" # 01:00 is "ett" not "en"
else:
speak += pronounce_number_sv(dt.hour)
if not dt.minute == 0:
if dt.minute < 10:
speak += ' noll'
if dt.minute == 1:
speak += ' ett'
else:
speak += " " + pronounce_number_sv(dt.minute)
return speak # ampm is ignored when use_24hour is true
else:
hour = dt.hour
if not dt.minute == 0:
if dt.minute < 30:
if dt.minute != 15:
speak += pronounce_number_sv(dt.minute)
else:
speak += 'kvart'
if dt.minute == 1:
speak += ' minut över '
elif dt.minute != 10 and dt.minute != 5 and dt.minute != 15:
speak += ' minuter över '
else:
speak += ' över '
elif dt.minute > 30:
if dt.minute != 45:
speak += pronounce_number_sv((60 - dt.minute))
else:
speak += 'kvart'
if dt.minute == 1:
speak += ' minut i '
elif dt.minute != 50 and dt.minute != 55 and dt.minute != 45:
speak += ' minuter i '
else:
speak += ' i '
hour = (hour + 1) % 12
elif dt.minute == 30:
speak += 'halv '
hour = (hour + 1) % 12
if hour == 0 and dt.minute == 0:
return "midnatt"
if hour == 12 and dt.minute == 0:
return "middag"
# TODO: "half past 3", "a quarter of 4" and other idiomatic times
if hour == 0:
speak += pronounce_number_sv(12)
elif hour <= 13:
if hour == 1 or hour == 13: # 01:00 and 13:00 is "ett"
speak += 'ett'
else:
speak += pronounce_number_sv(hour)
else:
speak += pronounce_number_sv(hour - 12)
if use_ampm:
if dt.hour > 11:
if dt.hour < 18:
# 12:01 - 17:59 nachmittags/afternoon
speak += " på eftermiddagen"
elif dt.hour < 22:
# 18:00 - 21:59 abends/evening
speak += " på kvällen"
else:
# 22:00 - 23:59 nachts/at night
speak += " på natten"
elif dt.hour < 3:
# 00:01 - 02:59 nachts/at night
speak += " på natten"
else:
# 03:00 - 11:59 morgens/in the morning
speak += " på morgonen"
return speak
def nice_response_sv(text):
# check for months and call nice_ordinal_sv declension of ordinals
# replace "^" with "hoch" (to the power of)
words = text.split()
for idx, word in enumerate(words):
if word.lower() in months:
text = nice_ordinal_sv(text)
if word == '^':
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
if wordNext.isnumeric():
words[idx] = "upphöjt till"
text = " ".join(words)
return text
def nice_ordinal_sv(text):
# check for months for declension of ordinals before months
# depending on articles/prepositions
normalized_text = text
words = text.split()
for idx, word in enumerate(words):
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordPrev = words[idx - 1] if idx > 0 else ""
if word[-1:] == ".":
if word[:-1].isdecimal():
if wordNext.lower() in months:
word = pronounce_ordinal_sv(int(word[:-1]))
if wordPrev.lower() in ["om", "den", "från", "till",
"(från", "(om", "till"]:
word += "n"
elif wordPrev.lower() not in ["den"]:
word += "r"
words[idx] = word
normalized_text = " ".join(words)
return normalized_text
| 29.201878
| 78
| 0.508923
|
dcbc981734d76d413997157fb3aef9992b537a71
| 13,768
|
py
|
Python
|
donghuangzhong/src/core/util.py
|
onsis/hack-tools
|
1a1dfebfab8c83379f17563900d3aeb9f240c658
|
[
"Apache-2.0"
] | null | null | null |
donghuangzhong/src/core/util.py
|
onsis/hack-tools
|
1a1dfebfab8c83379f17563900d3aeb9f240c658
|
[
"Apache-2.0"
] | null | null | null |
donghuangzhong/src/core/util.py
|
onsis/hack-tools
|
1a1dfebfab8c83379f17563900d3aeb9f240c658
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*-coding:utf-8 -*-
from scapy.error import Scapy_Exception
from signal import SIGINT
from datetime import date, datetime
from commands import getoutput
from subprocess import Popen
from cmd import Cmd
from pwd import getpwnam
from colors import color
from inspect import getmodule
import scapy.arch
import config
import os
import socket
import fcntl
import struct
import re
"""Utility class housing various functions in use
throughout the donghuangzhong framework.
"""
buffered = None
def version():
"""donghuangzhong version"""
return "0.0.1"
def header():
"""donghuangzhong header"""
ver = color.B_GREEN + ' [' + color.B_YELLOW + 'Version: ' + version() + \
color.B_GREEN + ']' + color.END
print color.B_GREEN + '\t 00'
print '\t 00 00 00'
print '\t 0 东 0'
print '\t 0 0'
print '\t 0 皇 0'
print '\t 0 0'
print '\t 0 钟 0'
print '\t 00 00' + ver
print color.END
if config.get('debug'):
print '\t ' + color.B_BLUE + ' [DEBUGGING]' + color.END
def Error(msg):
"""Prints the given message and, if debugging is on,
logs it.
"""
print color.B_RED + '[-] %s' % (msg) + color.END
if config.get('debug'):
debug(msg)
def Msg(msg):
"""Prints a warning message"""
print color.B_YELLOW + '[' + color.B_GREEN + '!' + color.B_YELLOW + '] %s' % (msg) + color.END
def debug(msg):
"""If debugging is enabled, write the given string
to the debug file
"""
dbg = config.get('log')
if config.get('debug') and not os.path.islink(dbg):
with open(dbg, 'a+') as f:
f.write(format('[%s] %s\n' % (timestamp(), msg))) # TODO add color
def get_input(msg):
""" fetch user input and unify input prompts
"""
tmp = None
try:
tmp = raw_input('[!] %s' % msg)
except:
tmp = None
return tmp
def get_calling_mod(stack):
""" Retrieve the calling function based on the call stack
"""
form = stack[1]
return getmodule(form[0]).__name__
def timestamp():
""" Generate a formatted timestamp
"""
return '%s %s' % (date.today().isoformat(),
datetime.now().strftime('%I:%M%p'))
def getipbyhost(hostname):
""" return the IP address for a hostname
"""
return socket.gethostbyname(hostname)
def next_ip(ip):
"""Return the next IP address following the given IP address.
It needs to be converted to an integer, then add 1,
then converted back to an IP address
"""
ip2int = lambda ipstr: struct.unpack('!I', socket.inet_aton(ipstr))[0]
int2ip = lambda n: socket.inet_ntoa(struct.pack('!I', n))
return int2ip(ip2int(ip) + 1)
def is_in_subnet(ip, mask):
"""Check if a given IP address is lies within the given netmask
TRUE if 'ip' falls within 'mask'
FALSE otherwise
"""
ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
netstr, bits = mask.split('/')
netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
def check_program(prog):
"""Check if program is installed and pathed properly"""
tmp = init_app('which {0}'.format(prog))
if len(tmp) > 0 and '/' in tmp:
return True
else:
return False
def init_app(prog, output=True):
"""inititalize an application
PROG is the full command with args
OUTPUT true if output should be returned
false if output should be dumped to null. This will
return a process handle and is meant for initializing
background processes. Use wisely.
"""
# dump output to null
if not output:
try:
null = open(os.devnull, 'w')
proc = Popen(prog, stdout=null, stderr=null)
except Exception, j:
Error("Error initializing app: %s" % j)
return False
return proc
# just grab output
else:
return getoutput(prog)
def kill_app(proc):
"""Kill a process"""
try:
os.kill(proc.pid, SIGINT)
except Exception, j:
Error("Error killing app: %s" % (j))
return False
return True
def get_monitor_adapter():
"""Try and automatically detect which adapter is in monitor mode.
NONE if there are none.
"""
tmp = init_app('iwconfig', True)
for line in tmp.split('\n'):
if line.startswith(' '):
continue
elif len(line.split(' ')[0]) > 1:
if 'Mode:Monitor' in line:
return line.split(' ')[0]
return None
def enable_monitor(channel=None):
"""Enable monitor mode on the wireless adapter
CHANNEL is the channel to monitor on.
"""
tmp = init_app('iwconfig', True)
iface = None
for line in tmp.split('\n'):
if line.startswith('wlan'):
try:
iface = line.split(' ')[0]
if channel is None:
tmp = getoutput('airmon-ng start {0}'.format(iface))
else:
tmp = getoutput('airmon-ng start {0} {1}'
.format(iface, channel))
debug("started \'%s\' in monitor mode" % iface)
except Exception, j:
Error("Error enabling monitor mode: %s" % j)
break
return get_monitor_adapter()
def disable_monitor():
"""Kill the monitoring adapter"""
try:
adapt = get_monitor_adapter()
if not adapt is None:
tmp = getoutput('airmon-ng stop %s' % adapt)
debug('killed monitor adapter %s' % adapt)
except Exception, j:
Error('error killing monitor adapter:%s' % j)
def verify_iface(iface):
"""Verify that the given interface exists
"""
try:
tmp = init_app('ifconfig', True)
if not iface in tmp:
return False
return True
except Exception:
return False
def does_file_exist(fle):
"""Check if a local file exists.
"""
try:
with open(fle) as f:
pass
except IOError:
return False
return True
def get_local_ip(adapter):
""" Return the IP address of an adapter.
@param adapter is the adapter to fetch from.
I do not know how portable this is yet.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
addr = socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack('256s', adapter[:15])
)[20:24])
except:
addr = None
return addr
def test_filter(net_filter):
""" Test a network filter to verify if its valid
"""
valid = False
try:
scapy.arch.attach_filter(None, net_filter)
except Scapy_Exception:
pass
except:
valid = True
return valid
def get_layer_bytes(layer):
"""I havent found a neat way to pull RAW bytes out of Scapy packets,
so I just wrote a small utility function for it.
"""
arr = []
layer = layer.encode('hex')
for (f, s) in zip(layer[0::2], layer[1::2]):
arr.append(f + s)
return arr
def check_opts(choice):
""" Parse up the user input and run whatever commands
are needed.
"""
choice_opts = choice.split(' ')
if len(choice_opts) == 1:
if type(choice) is int:
return choice
elif 'help' in choice:
help()
choice = -1
elif 'gops' in choice:
config.dump()
choice = -1
elif 'quit' in choice or 'exit' in choice:
# hard quit
os._exit(1)
elif 'bg' in choice:
background()
else:
if 'set' in choice_opts[0]:
opts = choice.split(' ')
if opts[1] is None or opts[2] is None:
return
print '[!] Setting ' + color.YELLOW + '%s' % opts[1] + color.END + \
'-> ' + color.GREEN + '%s..' % opts[2] + color.END
config.set(opts[1], opts[2])
choice = -1
return choice
def check_dependency(module):
""" Attempts to load the module; returns a boolean
indicating success or fail.
"""
try:
mod = __import__(module)
except Exception, e:
Error("Module %s failed to load! (%s)" % (module, e))
return False
return True
def help():
""" Dump a help menu with donghuangzhong options
"""
print color.B_YELLOW + '\n donghaungzhong options:' + color.B_WHITE
print color.B_GREEN + '\thelp\t\t\t' + color.B_WHITE + '- This menu'
print color.B_GREEN + '\tgops\t\t\t' + color.B_WHITE + '- Display global options'
print color.B_GREEN + '\texit\t\t\t' + color.B_WHITE + '- Exit immediately'
print color.B_GREEN + '\tbg\t\t\t' + color.B_WHITE + '- Put zarp to background'
print color.B_GREEN + '\tset [' + color.B_YELLOW + 'key' + color.B_GREEN + '] [' + \
color.B_YELLOW + 'value' + color.B_GREEN + ']' + color.B_WHITE + \
' \t- Set key to value' + color.END
print color.B_YELLOW + '\n donghuangzhong module options:' + color.B_WHITE
print color.B_GREEN + '\t[' + color.B_YELLOW + 'int' + color.B_GREEN + '] [' + \
color.B_YELLOW + 'value' + color.B_GREEN + ']\t\t' + color.B_WHITE + \
'- Set option [int] to value [value]'
print color.B_GREEN + '\t[' + color.B_YELLOW + 'int' + color.B_GREEN + '] o\t\t\t' + \
color.B_WHITE + '- View options for setting'
print color.B_GREEN + '\trun (r)\t\t\t' + color.B_WHITE + '- Run the selected module'
print color.B_GREEN + '\tinfo \t\t\t' + color.B_WHITE + '- Display module information'
print color.B_GREEN + '\tops \t\t\t' + color.B_WHITE + '- Display module options'
print color.END
def get_run_usr():
""" Fetch the user that launched zarp
"""
if 'SUDO_USER' in os.environ:
usr = os.environ['SUDO_USER']
else:
usr = init_app('who -m | awk \'{print $1;}\'')
# verify the user exists
try:
getpwnam(usr)
except:
usr = None
return usr
def background():
""" Drops the user back into their shell environment.
'exit' brings them back.
"""
usr = get_run_usr()
if usr is None:
return
Msg('\'exit\' when you\'re done..')
shell = os.environ['SHELL'] if 'SHELL' in os.environ else '/bin/bash'
if check_program(shell):
os.system('su -c %s %s' % (shell, usr))
else:
os.system('su -c /bin/sh %s' % usr)
def print_menu(arr):
global buffered
""" Main menu printer
@param arr is the menu array to print. Fetches input,
parses and built-in command keywords, and returns the selected idx.
"""
if not buffered is None:
# buffered input, return
if len(buffered) > 0:
return buffered.pop(0)
else:
buffered = None
tmp = Cmd()
arr = ['\t%s[%s%d%s] %s%s%s' % (color.B_GREEN, color.B_YELLOW, x + 1, color.B_GREEN,
color.B_WHITE, arr[x], color.END) for x in xrange(len(arr))]
tmp.columnize(arr, 100)
print '\n' + color.B_YELLOW + '0' + color.B_GREEN + ')' + color.B_WHITE + ' Back' + color.END
try:
choice = raw_input(color.B_WHITE + '> ' + color.END)
choice = check_opts(choice)
# buffered input
if choice > 1:
choice = choice.split(' ')
buffered = []
for entry in choice[1:]:
buffered.append(int(entry))
choice = int(choice[0])
except KeyboardInterrupt:
choice = -1
except Exception, e:
debug(e)
os.system('clear')
choice = -1
return choice
def eval_type(value, type):
""" Generic evaluation of types; returns true if the value is of type,
or false if it is not.
Returns a tuple of (bool, obj), where bool determines success and obj
is the value returned as type.
"""
rval = (False, None)
if type == "int":
try:
rval = (True, int(value))
except:
rval = (False, None)
elif type == "bool":
if value in ['True', 'true', '1']:
rval = (True, True)
elif value in ['False', 'false', '0']:
rval = (True, False)
elif type == "ip":
ip = value.split('.')
if len(ip) != 4:
rval = (False, None)
else:
try:
socket.inet_aton(value)
rval = (True, value)
except:
rval = (False, None)
elif type == "str":
# anything can be a string
rval = (True, str(value))
elif type == "ipmask":
ip = value.split('.')
if len(ip) != 4:
rval = (False, None)
else:
try:
int(ip[0])
int(ip[1])
int(ip[2])
rval = (True, value) if '/' in ip[3] else (False, None)
except:
rval = (False, None)
elif type == "regex":
try:
tmp = re.compile(value)
rval = (True, tmp)
except re.error:
rval = (False, None)
elif type == 'list':
# comma delimited
try:
rval = (True, value.split(','))
except:
rval = (False, None)
elif type == 'file':
if does_file_exist(value):
rval = (True, value)
else:
Error('Unrecognized type: %s' % type)
return rval
| 28.863732
| 98
| 0.547647
|
ba688e0b7d008284f3b8588da83ec1395a3bdfd2
| 5,693
|
py
|
Python
|
orttraining/orttraining/python/training/ortmodule/_inference_manager.py
|
photoszzt/onnxruntime
|
523db6ef443b0a41de1401004ae4b394dad3314c
|
[
"MIT"
] | 2
|
2021-07-02T23:57:21.000Z
|
2022-03-19T07:45:35.000Z
|
orttraining/orttraining/python/training/ortmodule/_inference_manager.py
|
photoszzt/onnxruntime
|
523db6ef443b0a41de1401004ae4b394dad3314c
|
[
"MIT"
] | 1
|
2021-06-18T11:01:02.000Z
|
2021-06-18T11:01:02.000Z
|
orttraining/orttraining/python/training/ortmodule/_inference_manager.py
|
photoszzt/onnxruntime
|
523db6ef443b0a41de1401004ae4b394dad3314c
|
[
"MIT"
] | 1
|
2021-06-18T10:35:22.000Z
|
2021-06-18T10:35:22.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from . import _utils, _io
from ._graph_execution_manager import GraphExecutionManager, RunStateInfo
from ._execution_agent import InferenceAgent
from onnxruntime.capi import _pybind_state as C
import onnx
import torch
class InferenceManager(GraphExecutionManager):
"""Concrete instance of GraphExecutionManager that is able to manage the inference model
InferenceManager is resposible for building and running the forward graph of the inference model
"""
def __init__(self, model):
super().__init__(model)
self._export_mode = torch.onnx.TrainingMode.EVAL
@staticmethod
def execution_session_run_forward(execution_session, onnx_model, device, *inputs):
"""Runs the forward graph on execution_session with given model inputs and device"""
# Assert that the input and model device match
_utils._check_same_device(device, "Input argument to forward", *inputs)
# TODO: Try to reuse the output buffers as some of the output tensors are same sizes,
# especially the backward graph outputs.
# REVIEW(codemzs): Consolidate Training Agent with InferenceAgent on C++ side to not
# have the need for passing IOBinding.
io_binding = execution_session.io_binding()
run_options = C.RunOptions()
# Use IO binding
_utils._create_iobinding(io_binding, inputs, onnx_model, device)
# Run and return module outputs.
ort_output = execution_session.run_forward(io_binding, run_options)
forward_outputs, run_id = ort_output.ortvalues, ort_output.run_id
user_outputs = tuple(_utils._ortvalue_to_torch_tensor(forward_output._ortvalue) for forward_output in forward_outputs)
state = None
# Assert that the outputs and model device match
_utils._check_same_device(device, "Output argument from forward", *user_outputs)
output_info = [(output.shape, output.device, output.dtype) for output in user_outputs]
run_info = RunStateInfo(state, output_info)
# Return user outputs and forward run information
return user_outputs, run_info
def forward(self, *inputs, **kwargs):
'''Forward pass of the inference model
ONNX model is exported the first time this method is executed.
Next, we build an optimized inference graph with module_graph_builder.
Finally, we instantiate the ONNX Runtime InferenceSession through the InferenceAgent.
'''
# Exporting module to ONNX for the first time
build_graph = self._export_model(*inputs, **kwargs)
if build_graph:
# If model was exported, then initialize the graph builder
self._initialize_graph_builder(training=False)
# Save the onnx model if the model was exported
if self._save_onnx:
onnx.save(self._onnx_model, self._save_onnx_prefix + '_exported_inference_model.onnx')
# Build the inference graph
if build_graph:
self._build_graph()
module_device = _utils.get_device_from_module(self._original_module)
# The inference session should be created every time
# the graph was built or if the device changed between calls to forward
create_execution_session = build_graph or self._device != module_device
if self._device != module_device:
self._device = module_device
if create_execution_session:
# Create execution session creates the inference_session
self._create_execution_agent()
user_outputs, _ = InferenceManager.execution_session_run_forward(self._execution_agent,
self._optimized_onnx_model,
self._device,
*_io._combine_input_buffers_initializers(
self._graph_initializers,
self._graph_info.user_input_names,
self._input_info,
self._flattened_module.named_buffers(),
inputs,
kwargs,
self._device))
return _io.unflatten_user_output(self._module_output_schema,
user_outputs)
def _build_graph(self):
"""Build an optimized inference graph using the module_graph_builder"""
super()._build_graph()
if self._save_onnx:
onnx.save(self._optimized_onnx_model, self._save_onnx_prefix + '_inference.onnx')
def _create_execution_agent(self):
"""Creates an InferenceAgent that can run forward graph on an inference model"""
session_options, providers, provider_options = self._get_session_config()
self._execution_agent = InferenceAgent(self._optimized_onnx_model.SerializeToString(),
session_options, providers, provider_options)
| 49.077586
| 126
| 0.597576
|
45994fa3dc2038cf4afa2d6378398673a7c1edff
| 80,230
|
py
|
Python
|
src/pyhees/section4_2.py
|
jjj-design/pyhees
|
d63e7cd84abfc2f509bc1cd1256598a10aac1825
|
[
"MIT"
] | null | null | null |
src/pyhees/section4_2.py
|
jjj-design/pyhees
|
d63e7cd84abfc2f509bc1cd1256598a10aac1825
|
[
"MIT"
] | 3
|
2022-01-04T07:29:52.000Z
|
2022-03-19T08:02:51.000Z
|
src/pyhees/section4_2.py
|
jjj-design/pyhees
|
d63e7cd84abfc2f509bc1cd1256598a10aac1825
|
[
"MIT"
] | 2
|
2022-01-19T07:57:10.000Z
|
2022-03-07T00:25:54.000Z
|
# ============================================================================
# 第四章 暖冷房設備
# 第二節 ダクト式セントラル空調機
# Ver.05(エネルギー消費性能計算プログラム(住宅版)Ver.02.01~)
# ============================================================================
import numpy as np
from functools import lru_cache
import datetime
import pyhees.section3_1 as ld
from pyhees.section3_2_8 import \
get_r_env
from pyhees.section11_1 import \
load_outdoor, \
get_T_ex, \
get_Theta_ex, \
get_X_ex, \
calc_h_ex, \
load_climate, \
get_J
from pyhees.section3_1 import \
get_A_NR
from pyhees.section4_7_i import \
get_A_A_R
from pyhees.section11_3 import \
load_schedule, \
get_schedule_ac
# 未処理負荷と機器の計算に必要な変数を取得
def calc_Q_UT_A(A_A, A_MR, A_OR, A_env, mu_H, mu_C, q_hs_rtd_H, q_hs_rtd_C, V_hs_dsgn_H, V_hs_dsgn_C, Q,
VAV, general_ventilation, duct_insulation, region, L_H_d_t_i, L_CS_d_t_i, L_CL_d_t_i):
"""
Args:
A_A: param A_MR:
A_OR: param A_env:
mu_H: param mu_C:
q_hs_rtd_H: param q_hs_rtd_C:
V_hs_dsgn_H: param V_hs_dsgn_C:
Q: param VAV:
general_ventilation: param duct_insulation:
region: param L_H_d_t_i:
L_CS_d_t_i: param L_CL_d_t_i:
A_MR:
A_env:
mu_C:
q_hs_rtd_C:
V_hs_dsgn_C:
VAV:
duct_insulation:
L_H_d_t_i:
L_CL_d_t_i:
Returns:
"""
# 外気条件
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
X_ex_d_t = get_X_ex(region, outdoor)
climate = load_climate(region)
J_d_t = get_J(climate)
h_ex_d_t = calc_h_ex(X_ex_d_t, Theta_ex_d_t)
A_HCZ_i = np.array([ld.get_A_HCZ_i(i, A_A, A_MR, A_OR) for i in range(1, 6)])
A_HCZ_R_i = [ld.get_A_HCZ_R_i(i) for i in range(1, 6)]
A_NR = get_A_NR(A_A, A_MR, A_OR)
# (67)
L_wtr = get_L_wtr()
# (66d)
n_p_NR_d_t = calc_n_p_NR_d_t(A_NR)
# (66c)
n_p_OR_d_t = calc_n_p_OR_d_t(A_OR)
# (66b)
n_p_MR_d_t = calc_n_p_MR_d_t(A_MR)
# (66a)
n_p_d_t = get_n_p_d_t(n_p_MR_d_t, n_p_OR_d_t, n_p_NR_d_t)
# 人体発熱
q_p_H = get_q_p_H()
q_p_CS = get_q_p_CS()
q_p_CL = get_q_p_CL()
# (65d)
w_gen_NR_d_t = calc_w_gen_NR_d_t(A_NR)
# (65c)
w_gen_OR_d_t = calc_w_gen_OR_d_t(A_OR)
# (65b)
w_gen_MR_d_t = calc_w_gen_MR_d_t(A_MR)
# (65a)
w_gen_d_t = get_w_gen_d_t(w_gen_MR_d_t, w_gen_OR_d_t, w_gen_NR_d_t)
# (64d)
q_gen_NR_d_t = calc_q_gen_NR_d_t(A_NR)
# (64c)
q_gen_OR_d_t = calc_q_gen_OR_d_t(A_OR)
# (64b)
q_gen_MR_d_t = calc_q_gen_MR_d_t(A_MR)
# (64a)
q_gen_d_t = get_q_gen_d_t(q_gen_MR_d_t, q_gen_OR_d_t, q_gen_NR_d_t)
# (63)
V_vent_l_NR_d_t = get_V_vent_l_NR_d_t()
V_vent_l_OR_d_t = get_V_vent_l_OR_d_t()
V_vent_l_MR_d_t = get_V_vent_l_MR_d_t()
V_vent_l_d_t = get_V_vent_l_d_t(V_vent_l_MR_d_t, V_vent_l_OR_d_t, V_vent_l_NR_d_t)
# (62)
V_vent_g_i = get_V_vent_g_i(A_HCZ_i, A_HCZ_R_i)
# (61)
U_prt = get_U_prt()
# (60)
r_env = get_r_env(A_env, A_A)
A_prt_i = get_A_prt_i(A_HCZ_i, r_env, A_MR, A_NR, A_OR)
# (59)
Theta_SAT_d_t = get_Theta_SAT_d_t(Theta_ex_d_t, J_d_t)
# (58)
l_duct_ex_i = get_l_duct_ex_i(A_A)
# (57)
l_duct_in_i = get_l_duct_in_i(A_A)
# (56)
l_duct_i = get_l_duct__i(l_duct_in_i, l_duct_ex_i)
# (51)
X_star_HBR_d_t = get_X_star_HBR_d_t(X_ex_d_t, region)
# (50)
Theta_star_HBR_d_t = get_Theta_star_HBR_d_t(Theta_ex_d_t, region)
# (55)
Theta_attic_d_t = get_Theta_attic_d_t(Theta_SAT_d_t, Theta_star_HBR_d_t)
# (54)
Theta_sur_d_t_i = get_Theta_sur_d_t_i(Theta_star_HBR_d_t, Theta_attic_d_t, l_duct_in_i, l_duct_ex_i, duct_insulation)
# (40)
Q_hat_hs_d_t = calc_Q_hat_hs_d_t(Q, A_A, V_vent_l_d_t, V_vent_g_i, mu_H, mu_C, J_d_t, q_gen_d_t, n_p_d_t, q_p_H,
q_p_CS, q_p_CL, X_ex_d_t, w_gen_d_t, Theta_ex_d_t, L_wtr, region)
# (39)
V_hs_min = get_V_hs_min(V_vent_g_i)
# (38)
Q_hs_rtd_C = get_Q_hs_rtd_C(q_hs_rtd_C)
# (37)
Q_hs_rtd_H = get_Q_hs_rtd_H(q_hs_rtd_H)
# (36)
V_dash_hs_supply_d_t = get_V_dash_hs_supply_d_t(V_hs_min, V_hs_dsgn_H, V_hs_dsgn_C, Q_hs_rtd_H, Q_hs_rtd_C, Q_hat_hs_d_t, region)
# (45)
r_supply_des_i = get_r_supply_des_i(A_HCZ_i)
# (44)
V_dash_supply_d_t_i = get_V_dash_supply_d_t_i(r_supply_des_i, V_dash_hs_supply_d_t, V_vent_g_i)
# (53)
X_star_NR_d_t = get_X_star_NR_d_t(X_star_HBR_d_t, L_CL_d_t_i, L_wtr, V_vent_l_NR_d_t, V_dash_supply_d_t_i, region)
# (52)
Theta_star_NR_d_t = get_Theta_star_NR_d_t(Theta_star_HBR_d_t, Q, A_NR, V_vent_l_NR_d_t, V_dash_supply_d_t_i, U_prt,
A_prt_i, L_H_d_t_i, L_CS_d_t_i, region)
# (49)
X_NR_d_t = get_X_NR_d_t(X_star_NR_d_t)
# (47)
X_HBR_d_t_i = get_X_HBR_d_t_i(X_star_HBR_d_t)
# (11)
Q_star_trs_prt_d_t_i = get_Q_star_trs_prt_d_t_i(U_prt, A_prt_i, Theta_star_HBR_d_t, Theta_star_NR_d_t)
# (10)
L_star_CL_d_t_i = get_L_star_CL_d_t_i(L_CS_d_t_i, L_CL_d_t_i, region)
# (9)
L_star_CS_d_t_i = get_L_star_CS_d_t_i(L_CS_d_t_i, Q_star_trs_prt_d_t_i, region)
# (8)
L_star_H_d_t_i = get_L_star_H_d_t_i(L_H_d_t_i, Q_star_trs_prt_d_t_i, region)
# (33)
L_star_CL_d_t = get_L_star_CL_d_t(L_star_CL_d_t_i)
# (32)
L_star_CS_d_t = get_L_star_CS_d_t(L_star_CS_d_t_i)
# (31)
L_star_CL_max_d_t = get_L_star_CL_max_d_t(L_star_CS_d_t)
# (30)
L_star_dash_CL_d_t = get_L_star_dash_CL_d_t(L_star_CL_max_d_t, L_star_CL_d_t)
# (29)
L_star_dash_C_d_t = get_L_star_dash_C_d_t(L_star_CS_d_t, L_star_dash_CL_d_t)
# (28)
SHF_dash_d_t = get_SHF_dash_d_t(L_star_CS_d_t, L_star_dash_C_d_t)
# (27)
Q_hs_max_C_d_t = get_Q_hs_max_C_d_t(q_hs_rtd_C)
# (26)
Q_hs_max_CL_d_t = get_Q_hs_max_CL_d_t(Q_hs_max_C_d_t, SHF_dash_d_t, L_star_dash_CL_d_t)
# (25)
Q_hs_max_CS_d_t = get_Q_hs_max_CS_d_t(Q_hs_max_C_d_t, SHF_dash_d_t)
# (24)
C_df_H_d_t = get_C_df_H_d_t(Theta_ex_d_t, h_ex_d_t)
# (23)
Q_hs_max_H_d_t = get_Q_hs_max_H_d_t(q_hs_rtd_H, C_df_H_d_t)
# (20)
X_star_hs_in_d_t = get_X_star_hs_in_d_t(X_star_NR_d_t)
# (19)
Theta_star_hs_in_d_t = get_Theta_star_hs_in_d_t(Theta_star_NR_d_t)
# (18)
X_hs_out_min_C_d_t = get_X_hs_out_min_C_d_t(X_star_hs_in_d_t, Q_hs_max_CL_d_t, V_dash_supply_d_t_i)
# (22)
X_req_d_t_i = get_X_req_d_t_i(X_star_HBR_d_t, L_star_CL_d_t_i, V_dash_supply_d_t_i, region)
# (21)
Theta_req_d_t_i = get_Theta_req_d_t_i(Theta_sur_d_t_i, Theta_star_HBR_d_t, V_dash_supply_d_t_i,
L_star_H_d_t_i, L_star_CS_d_t_i, l_duct_i, region)
# (15)
X_hs_out_d_t = get_X_hs_out_d_t(X_NR_d_t, X_req_d_t_i, V_dash_supply_d_t_i, X_hs_out_min_C_d_t, L_star_CL_d_t_i, region)
# 式(14)(46)(48)の条件に合わせてTheta_NR_d_tを初期化
Theta_NR_d_t = np.zeros(24 * 365)
# (17)
Theta_hs_out_min_C_d_t = get_Theta_hs_out_min_C_d_t(Theta_star_hs_in_d_t, Q_hs_max_CS_d_t, V_dash_supply_d_t_i)
# (16)
Theta_hs_out_max_H_d_t = get_Theta_hs_out_max_H_d_t(Theta_star_hs_in_d_t, Q_hs_max_H_d_t, V_dash_supply_d_t_i)
# L_star_H_d_t_i,L_star_CS_d_t_iの暖冷房区画1~5を合算し0以上だった場合の順序で計算
# (14)
Theta_hs_out_d_t = get_Theta_hs_out_d_t(VAV, Theta_req_d_t_i, V_dash_supply_d_t_i,
L_star_H_d_t_i, L_star_CS_d_t_i, region, Theta_NR_d_t,
Theta_hs_out_max_H_d_t, Theta_hs_out_min_C_d_t)
# (43)
V_supply_d_t_i = get_V_supply_d_t_i(L_star_H_d_t_i, L_star_CS_d_t_i, Theta_sur_d_t_i, l_duct_i, Theta_star_HBR_d_t,
V_vent_g_i, V_dash_supply_d_t_i, VAV, region, Theta_hs_out_d_t)
# (41)
Theta_supply_d_t_i = get_Thata_supply_d_t_i(Theta_sur_d_t_i, Theta_hs_out_d_t, Theta_star_HBR_d_t, l_duct_i,
V_supply_d_t_i, L_star_H_d_t_i, L_star_CS_d_t_i, region)
# (46)
Theta_HBR_d_t_i = get_Theta_HBR_d_t_i(Theta_star_HBR_d_t, V_supply_d_t_i, Theta_supply_d_t_i, U_prt, A_prt_i, Q,
A_HCZ_i, L_star_H_d_t_i, L_star_CS_d_t_i, region)
# (48)
Theta_NR_d_t = get_Theta_NR_d_t(Theta_star_NR_d_t, Theta_star_HBR_d_t, Theta_HBR_d_t_i, A_NR, V_vent_l_NR_d_t,
V_dash_supply_d_t_i, V_supply_d_t_i, U_prt, A_prt_i, Q)
# L_star_H_d_t_i,L_star_CS_d_t_iの暖冷房区画1~5を合算し0以下だった場合の為に再計算
# (14)
Theta_hs_out_d_t = get_Theta_hs_out_d_t(VAV, Theta_req_d_t_i, V_dash_supply_d_t_i,
L_star_H_d_t_i, L_star_CS_d_t_i, region, Theta_NR_d_t,
Theta_hs_out_max_H_d_t, Theta_hs_out_min_C_d_t)
# (42)
X_supply_d_t_i = get_X_supply_d_t_i(X_star_HBR_d_t, X_hs_out_d_t, L_star_CL_d_t_i, region)
# (35)
V_hs_vent_d_t = get_V_hs_vent_d_t(V_vent_g_i, general_ventilation)
# (34)
V_hs_supply_d_t = get_V_hs_supply_d_t(V_supply_d_t_i)
# (13)
X_hs_in_d_t = get_X_hs_in_d_t(X_NR_d_t)
# (12)
Theta_hs_in_d_t = get_Theta_hs_in_d_t(Theta_NR_d_t)
# (7)
L_dash_CL_d_t_i = get_L_dash_CL_d_t_i(V_supply_d_t_i, X_HBR_d_t_i, X_supply_d_t_i, region)
# (6)
L_dash_CS_d_t_i = get_L_dash_CS_d_t_i(V_supply_d_t_i, Theta_supply_d_t_i, Theta_HBR_d_t_i, region)
# (5)
L_dash_H_d_t_i = get_L_dash_H_d_t_i(V_supply_d_t_i, Theta_supply_d_t_i, Theta_HBR_d_t_i, region)
# (4)
Q_UT_CL_d_t_i = get_Q_UT_CL_d_t_i(L_star_CL_d_t_i, L_dash_CL_d_t_i)
# (3)
Q_UT_CS_d_t_i = get_Q_UT_CS_d_t_i(L_star_CS_d_t_i, L_dash_CS_d_t_i)
# (2)
Q_UT_H_d_t_i = get_Q_UT_H_d_t_i(L_star_H_d_t_i, L_dash_H_d_t_i)
# (1)
E_C_UT_d_t = get_E_C_UT_d_t(Q_UT_CL_d_t_i, Q_UT_CS_d_t_i, region)
return E_C_UT_d_t, Q_UT_H_d_t_i, Q_UT_CS_d_t_i, Q_UT_CL_d_t_i, Theta_hs_out_d_t, Theta_hs_in_d_t, \
X_hs_out_d_t, X_hs_in_d_t, V_hs_supply_d_t, V_hs_vent_d_t, C_df_H_d_t
# ============================================================================
# 5 暖房エネルギー消費量
# ============================================================================
# ============================================================================
# 5.1 消費電力量
# ============================================================================
# 4_2_aで実装
# ============================================================================
# 5.2 ガス消費量
# ============================================================================
# 日付dの時刻tにおける1時間当たりのガス消費量 (MJ/h)
def get_E_G_H_d_t():
"""ガス消費量
ガス消費量は0とする
Args:
Returns:
ndarray: ガス消費量
"""
# ガス消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 5.3 灯油消費量
# ============================================================================
# 日付dの時刻tにおける1時間当たりの灯油消費量 (MJ/h)
def get_E_K_H_d_t():
"""灯油消費量
灯油消費量は0とする
Args:
Returns:
ndarray: 灯油消費量
"""
# 灯油消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 5.4 その他の燃料による一次エネルギ―消費量
# ============================================================================
# 日付dの時刻tにおける1時間当たりのその他の燃料による一次エネルギー消費量 (MJ/h)
def get_E_M_H_d_t():
"""その他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: その他の燃料による一次エネルギー消費量
"""
# その他の燃料による一次エネルギー消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 6 冷房エネルギー消費量
# ============================================================================
# ============================================================================
# 6.1 消費電力量
# ============================================================================
# 4_2_aで実装
# ============================================================================
# 6.2 ガス消費量
# ============================================================================
# 日付dの時刻tにおける1時間当たりのガス消費量 (MJ/h)
def get_E_G_C_d_t():
""" """
# ガス消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 6.3 灯油消費量
# ============================================================================
# 日付dの時刻tにおける1時間当たりの灯油消費量 (MJ/h)
def get_E_K_C_d_t():
""" """
# 灯油消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 6.4 その他の燃料による一次エネルギ―消費量
# ============================================================================
# 日付dの時刻tにおける1時間当たりのその他の燃料による一次エネルギー消費量 (MJ/h)
def get_E_M_C_d_t():
""" """
# その他の燃料による一次エネルギー消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 7 冷房設備の未処理冷房負荷の設計一次エネルギー消費量相当値
# ============================================================================
def get_E_C_UT_d_t(Q_UT_CL_d_t_i, Q_UT_CS_d_t_i, region):
"""(1)
Args:
Q_UT_CL_d_t_i: 日付dの時刻tにおける1時間当たりの暖冷房区画iに設置された冷房機器の未処理冷房潜熱負荷(MJ/h)
Q_UT_CS_d_t_i: 日付dの時刻tにおける1時間当たりの暖冷房区画iに設置された冷房機器の未処理冷房顕熱負荷(MJ/h)
region: 地域区分
Returns:
日付dの時刻tにおける1時間当たりの冷房設備の未処理冷房負荷の設計一次エネルギー消費量相当値(MJ/h)
"""
# 暖房設備の未処理冷房負荷を未処理暖房負荷の設計一次エネルギー消費量相当値に換算する係数α_(UT,H)(-)を取得
from pyhees.section4_1 import \
get_alpha_UT_H_A
region = 7 if region == 8 else region
alpha_UT_H_A = get_alpha_UT_H_A(region)
# 冷房設備の未処理冷房負荷を未処理冷房負荷の設計一次エネルギー消費量相当値に換算する係数(-)
alpha_UT_C = alpha_UT_H_A
return np.sum(alpha_UT_C * (Q_UT_CL_d_t_i + Q_UT_CS_d_t_i), axis=0)
# ============================================================================
# 8 未処理負荷
# ============================================================================
# メモ: i=1-5のみ i>=6 の場合はどこで計算するのか要確認
def get_Q_UT_H_d_t_i(L_star_H_d_t_i, L_dash_H_d_t_i):
"""(2)
Args:
L_star_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の暖房負荷(MJ/h)
L_dash_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱損失を含む実際の暖房負荷(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの暖冷房区画iに設置された暖房設備機器等の未処理暖房負荷(MJ/h)
"""
return np.clip(L_star_H_d_t_i[:5] - L_dash_H_d_t_i[:5], 0, None)
# メモ: i=1-5のみ i>=6 の場合はどこで計算するのか要確認
def get_Q_UT_CS_d_t_i(L_star_CS_d_t_i, L_dash_CS_d_t_i):
"""(3)
Args:
L_star_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷(MJ/h)
L_dash_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱損失を含む実際の冷房顕熱負荷(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの暖冷房区画iに設置された冷房機器の未処理冷房顕熱負荷(MJ/h)
"""
return np.clip(L_star_CS_d_t_i[:5] - L_dash_CS_d_t_i[:5], 0, None)
# メモ: i=1-5のみ i>=6 の場合はどこで計算するのか要確認
def get_Q_UT_CL_d_t_i(L_star_CL_d_t_i, L_dash_CL_d_t_i):
"""(4)
Args:
L_star_CL_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房潜熱負荷(MJ/h)
L_dash_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱損失を含む実際の冷房潜熱負荷(MJ/h)
L_dash_CL_d_t_i: returns: 日付dの時刻tにおける1時間当たりの暖冷房区画iに設置された冷房機器の未処理冷房潜熱負荷(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの暖冷房区画iに設置された冷房機器の未処理冷房潜熱負荷(MJ/h)
"""
return np.clip(L_star_CL_d_t_i[:5] - L_dash_CL_d_t_i[:5], 0, None)
# メモ: i=1-5のみ i>=6 の場合はどこで計算するのか要確認
def get_L_dash_H_d_t_i(V_supply_d_t_i, Theta_supply_d_t_i, Theta_HBR_d_t_i, region):
"""(5-1)(5-2)(5-3)
Args:
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
Theta_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し温度(℃)
Theta_HBR_d_t_i: 日付dの時刻tにおける暖冷房区画iの実際の居室の室温(℃)
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱損失を含む実際の暖房負荷(MJ/h)
"""
c_p_air = get_c_p_air()
rho_air = get_rho_air()
H, C, M = get_season_array_d_t(region)
L_dash_H_d_t_i = np.zeros((5, 24 * 365))
# 暖房期 (5-1)
L_dash_H_d_t_i[:, H] = c_p_air * rho_air * V_supply_d_t_i[:, H] * (Theta_supply_d_t_i[:, H] - Theta_HBR_d_t_i[:, H]) * 10 ** -6
# 冷房期 (5-2)
L_dash_H_d_t_i[:, C] = 0.0
# 中間期 (5-3)
L_dash_H_d_t_i[:, M] = 0.0
return L_dash_H_d_t_i
def get_L_dash_CS_d_t_i(V_supply_d_t_i, Theta_supply_d_t_i, Theta_HBR_d_t_i, region):
"""(6-1)(6-2)(6-3)
Args:
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
Theta_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し温度(℃)
Theta_HBR_d_t_i: 日付dの時刻tにおける暖冷房区画iの実際の居室の室温(℃)
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱損失を含む実際の冷房顕熱および潜熱負荷(MJ/h)
"""
c_p_air = get_c_p_air()
rho_air = get_rho_air()
H, C, M = get_season_array_d_t(region)
L_dash_CS_d_t_i = np.zeros((5, 24 * 365))
# 暖房期 (6-1)
L_dash_CS_d_t_i[:, H] = 0.0
# 冷房期 (6-2)
L_dash_CS_d_t_i[:, C] = c_p_air * rho_air * V_supply_d_t_i[:, C] * (Theta_HBR_d_t_i[:, C] - Theta_supply_d_t_i[:, C]) * 10 ** -6
# 中間期 (6-3)
L_dash_CS_d_t_i[:, M] = 0.0
return L_dash_CS_d_t_i
def get_L_dash_CL_d_t_i(V_supply_d_t_i, X_HBR_d_t_i, X_supply_d_t_i, region):
"""(7-1)(7-2)(7-3)
Args:
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
X_HBR_d_t_i: 日付dの時刻tにおける暖冷房区画iの実際の居室の絶対湿度(kg/kg(DA))
X_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し絶対湿度(kg/kg(DA))
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱損失を含む実際の冷房顕熱および潜熱負荷(MJ/h)
"""
L_wtr = get_L_wtr()
rho_air = get_rho_air()
H, C, M = get_season_array_d_t(region)
L_dash_CL_d_t_i = np.zeros((5, 24 * 365))
# 暖房期 (7-1)
L_dash_CL_d_t_i[:, H] = 0.0
# 冷房期 (7-2)
L_dash_CL_d_t_i[:, C] = L_wtr * rho_air * V_supply_d_t_i[:, C] * (X_HBR_d_t_i[:, C] - X_supply_d_t_i[:, C]) * 10 ** -3
# 中間期 (7-3)
L_dash_CL_d_t_i[:, M] = 0.0
return L_dash_CL_d_t_i
def get_L_star_H_d_t_i(L_H_d_t_i, Q_star_trs_prt_d_t_i, region):
"""(8-1)(8-2)(8-3)
Args:
L_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの暖房負荷(MJ/h)
Q_star_trs_prt_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の非居室への熱移動(MJ/h)
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の暖房負荷
"""
H, C, M = get_season_array_d_t(region)
L_H_d_t_i = L_H_d_t_i[:5]
f = L_H_d_t_i > 0
Hf = np.logical_and(H, f)
L_star_H_d_t_i = np.zeros((5, 24 * 365))
L_star_H_d_t_i[Hf] = np.clip(L_H_d_t_i[Hf] + Q_star_trs_prt_d_t_i[Hf], 0, None)
return L_star_H_d_t_i
def get_L_star_CS_d_t_i(L_CS_d_t_i, Q_star_trs_prt_d_t_i, region):
"""(9-2)(9-2)(9-3)
Args:
L_CS_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの冷房顕熱負荷(MJ/h)
Q_star_trs_prt_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の非居室への熱移動(MJ/h)
region: 地域区分
L_CS_d_t_i: returns: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷
"""
H, C, M = get_season_array_d_t(region)
L_CS_d_t_i = L_CS_d_t_i[:5]
f = L_CS_d_t_i > 0
Cf = np.logical_and(C, f)
L_star_CS_d_t_i = np.zeros((5, 24 * 365))
L_star_CS_d_t_i[Cf] = np.clip(L_CS_d_t_i[Cf] + Q_star_trs_prt_d_t_i[Cf], 0, None)
return L_star_CS_d_t_i
def get_L_star_CL_d_t_i(L_CS_d_t_i, L_CL_d_t_i, region):
"""(10-1)(10-2)(10-3)
Args:
L_CL_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの冷房潜熱負荷(MJ/h)
region: 地域区分
L_CS_d_t_i: returns: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房潜熱負荷
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房潜熱負荷
"""
H, C, M = get_season_array_d_t(region)
L_CL_d_t_i = L_CL_d_t_i[:5]
L_CS_d_t_i = L_CS_d_t_i[:5]
f = L_CS_d_t_i > 0
Cf = np.logical_and(C, f)
L_star_CL_d_t_i = np.zeros((5, 24 * 365))
L_star_CL_d_t_i[Cf] = L_CL_d_t_i[Cf]
return L_star_CL_d_t_i
def get_Q_star_trs_prt_d_t_i(U_prt, A_prt_i, Theta_star_HBR_d_t, Theta_star_NR_d_t):
"""(11)
Args:
U_prt: 間仕切りの熱貫流率(W/(m2・K))
A_prt_i: 暖冷房区画iから見た非居室の間仕切りの面積(m2)
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
Theta_star_NR_d_t: 日付dの時刻tにおける負荷バランス時の非居室の室温(℃)
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の非居室への熱移動(MJ/h)
"""
return U_prt * A_prt_i[:5, np.newaxis] * (Theta_star_HBR_d_t - Theta_star_NR_d_t) * 3600 * 10 ** -6
# ============================================================================
# 9 熱源機
# ============================================================================
# ============================================================================
# 9.1 熱源機の入り口における空気温度・絶対湿度
# ============================================================================
def get_Theta_hs_in_d_t(Theta_NR_d_t):
"""(12)
Args:
Theta_NR_d_t: 日付dの時刻tにおける非居室の室温(℃)
Returns:
日付dの時刻tにおける熱源機の入口における空気温度(℃)
"""
return Theta_NR_d_t
def get_X_hs_in_d_t(X_NR_d_t):
"""(13)
Args:
X_NR_d_t: 日付dの時刻tにおける非居室の絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける熱源機の入口における絶対湿度(kg/kg(DA))
"""
return X_NR_d_t
# ============================================================================
# 9.2 熱源機の出口における空気温度・絶対湿度
# ============================================================================
def get_Theta_hs_out_d_t(VAV, Theta_req_d_t_i, V_dash_supply_d_t_i, L_star_H_d_t_i, L_star_CS_d_t_i, region, Theta_NR_d_t,
Theta_hs_out_max_H_d_t, Theta_hs_out_min_C_d_t):
"""(14-1)(14-2)(14-3)(14-4)(14-5)(14-6)
Args:
VAV: VAV有無
Theta_req_d_t_i: 日付dの時刻tにおける暖冷房区画iの熱源機の出口における要求空気温度(℃)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
L_star_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の暖房負荷(MJ/h)
L_star_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房負荷(MJ/h)
region: 地域区分
Theta_NR_d_t: 日付dの時刻tにおける非居室の室温(℃)
Theta_hs_out_max_H_d_t: param Theta_hs_out_min_C_d_t:
Theta_hs_out_min_C_d_t:
Returns:
"""
H, C, M = get_season_array_d_t(region)
Theta_hs_out_d_t = np.zeros(24 * 365)
f1 = np.logical_and(H, np.sum(L_star_H_d_t_i[:5], axis=0) > 0)
f2 = np.logical_and(H, np.sum(L_star_H_d_t_i[:5], axis=0) <= 0)
f3 = np.logical_and(C, np.sum(L_star_CS_d_t_i[:5], axis=0) > 0)
f4 = np.logical_and(C, np.sum(L_star_CS_d_t_i[:5], axis=0) <= 0)
if VAV == False:
# 暖房期および冷房期 (14-1)
Theta_hs_out_d_t[f1] = np.sum(Theta_req_d_t_i[:5, f1] * V_dash_supply_d_t_i[:5, f1], axis=0) / \
np.sum(V_dash_supply_d_t_i[:5, f1], axis=0)
Theta_hs_out_d_t[f2] = Theta_NR_d_t[f2]
# 熱源機の出口における空気温度θ_(hs,out,d,t)は、暖房期においては、暖房時の熱源機の出口における
# 空気温度の最高値θ_(hs,out,max,H,d,t)を超える場合は、暖房時の熱源機の出口における空気温度の最高値θ_(hs,out,max,H,d,t)に等しい
Theta_hs_out_d_t[H] = np.clip(Theta_hs_out_d_t[H], None, Theta_hs_out_max_H_d_t[H])
# 冷房期 (14-2)
Theta_hs_out_d_t[f3] = np.sum(Theta_req_d_t_i[:5, f3] * V_dash_supply_d_t_i[:5, f3], axis=0) / \
np.sum(V_dash_supply_d_t_i[:5, f3], axis=0)
Theta_hs_out_d_t[f4] = Theta_NR_d_t[f4]
# 冷房期においては、冷房時の熱源機の出口における空気温度の最低値θ_(hs,out,min,C,d,t)を下回る場合は、
# 冷房時の熱源機の出口における空気温度の最低値θ_(hs,out,min,C,d,t)に等しい
Theta_hs_out_d_t[C] = np.clip(Theta_hs_out_d_t[C], Theta_hs_out_min_C_d_t[C], None)
# 中間期 (14-3)
Theta_hs_out_d_t[M] = Theta_NR_d_t[M]
else:
# 暖房期 (14-4)
Theta_hs_out_d_t[f1] = np.amax(Theta_req_d_t_i[:5, f1], axis=0)
Theta_hs_out_d_t[f2] = Theta_NR_d_t[f2]
# 熱源機の出口における空気温度θ_(hs,out,d,t)は、暖房期においては、暖房時の熱源機の出口における
# 空気温度の最高値θ_(hs,out,max,H,d,t)を超える場合は、暖房時の熱源機の出口における空気温度の最高値θ_(hs,out,max,H,d,t)に等しい
Theta_hs_out_d_t[H] = np.clip(Theta_hs_out_d_t[H], None, Theta_hs_out_max_H_d_t[H])
# 冷房期 (14-5)
Theta_hs_out_d_t[f3] = np.amin(Theta_req_d_t_i[:5, f3], axis=0)
Theta_hs_out_d_t[f4] = Theta_NR_d_t[f4]
# 冷房期においては、冷房時の熱源機の出口における空気温度の最低値θ_(hs,out,min,C,d,t)を下回る場合は、
# 冷房時の熱源機の出口における空気温度の最低値θ_(hs,out,min,C,d,t)に等しい
Theta_hs_out_d_t[C] = np.clip(Theta_hs_out_d_t[C], Theta_hs_out_min_C_d_t[C], None)
# 中間期 (14-6)
Theta_hs_out_d_t[M] = Theta_NR_d_t[M]
return Theta_hs_out_d_t
def get_X_hs_out_d_t(X_NR_d_t, X_req_d_t_i, V_dash_supply_d_t_i, X_hs_out_min_C_d_t, L_star_CL_d_t_i, region):
"""(15-1)(15-2)
Args:
X_NR_d_t: 日付dの時刻tにおける非居室の絶対湿度(kg/kg(DA))
X_req_d_t_i: 日付dの時刻tにおける暖冷房区画iの熱源機の出口における要求絶対湿度(kg/kg(DA))
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
X_hs_out_min_C_d_t: 日付dの時刻tにおける冷房時の熱源機の出口における絶対湿度の最低値(kg/kg(DA))
L_star_CL_d_t_i: param region: 地域区分
region: returns: 日付dの時刻tにおける熱源機の出口における絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける熱源機の出口における絶対湿度(kg/kg(DA))
"""
H, C, M = get_season_array_d_t(region)
X_hs_out_d_t = np.zeros(24 * 365)
# 暖房期および中間期 (15-1)
HM = np.logical_or(H, M)
X_hs_out_d_t[HM] = X_NR_d_t[HM]
# 冷房期 (15-2)
f1 = np.logical_and(C, np.sum(L_star_CL_d_t_i[:5], axis=0) > 0)
f2 = np.logical_and(C, np.sum(L_star_CL_d_t_i[:5], axis=0) <= 0)
X_hs_out_d_t[f1] = np.sum(X_req_d_t_i[:5, f1] * V_dash_supply_d_t_i[:5, f1], axis=0) / \
np.sum(V_dash_supply_d_t_i[:5, f1], axis=0)
X_hs_out_d_t[f2] = X_NR_d_t[f2]
# 冷房期に限って判定した方が良い??仕様があいまいな気がする!!
X_hs_out_d_t = np.clip(X_hs_out_d_t, X_hs_out_min_C_d_t, None)
return X_hs_out_d_t
# ============================================================================
# 9.3 最大出力時の熱源機の出口の空気温度・絶対湿度
# ============================================================================
def get_Theta_hs_out_max_H_d_t(Theta_star_hs_in_d_t, Q_hs_max_H_d_t, V_dash_supply_d_t_i):
"""(16)
Args:
Theta_star_hs_in_d_t: 日付dの時刻tにおける負荷バランス時の熱源機の入口における空気温度(℃)
Q_hs_max_H_d_t: 日付dの時刻tにおける1時間当たりの熱源機の最大暖房出力(MJ/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
Returns:
日付dの時刻tにおける暖房時の熱源機の出口における空気温度の最高値(℃)
"""
c_p_air = get_c_p_air()
rho_air = get_rho_air()
return np.clip(Theta_star_hs_in_d_t + ((Q_hs_max_H_d_t * 10 ** 6) / \
(c_p_air * rho_air * np.sum(V_dash_supply_d_t_i[:5, :], axis=0))), None, 45)
def get_Theta_hs_out_min_C_d_t(Theta_star_hs_in_d_t, Q_hs_max_CS_d_t, V_dash_supply_d_t_i):
"""(17)
Args:
Theta_star_hs_in_d_t: 日付dの時刻tにおける負荷バランス時の熱源機の入口における空気温度(℃)
Q_hs_max_CS_d_t: 日付dの時刻tにおける1時間当たりの熱源機の最大冷房顕熱出力(MJ/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
Returns:
日付dの時刻tにおける冷房時の熱源機の出口における空気温度の最低値(℃)
"""
c_p_air = get_c_p_air()
rho_air = get_rho_air()
return np.clip(Theta_star_hs_in_d_t - ((Q_hs_max_CS_d_t * 10 ** 6) / \
(c_p_air * rho_air * np.sum(V_dash_supply_d_t_i[:5, :], axis=0))), 15, None)
def get_X_hs_out_min_C_d_t(X_star_hs_in_d_t, Q_hs_max_CL_d_t, V_dash_supply_d_t_i):
"""(18)
Args:
X_star_hs_in_d_t: 日付dの時刻tにおける負荷バランス時の熱源機の入口における絶対湿度(kg/kg(DA))
Q_hs_max_CL_d_t: 日付dの時刻tにおける1時間当たりの熱源機の最大冷房潜熱出力(MJ/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
Returns:
日付dの時刻tにおける冷房時の熱源機の出口における空気温度の最低値(℃)
"""
rho_air = get_rho_air()
L_wtr = get_L_wtr()
return X_star_hs_in_d_t - ((Q_hs_max_CL_d_t * 10 ** 3) / (rho_air * L_wtr * np.sum(V_dash_supply_d_t_i[:5, :], axis=0)))
def get_Theta_star_hs_in_d_t(Theta_star_NR_d_t):
"""(19)
Args:
Theta_star_NR_d_t: 日付dの時刻tにおける負荷バランス時の非居室の室温(℃)
Returns:
日付dの時刻tにおける負荷バランス時の熱源機の入口における空気温度(℃)
"""
return Theta_star_NR_d_t
def get_X_star_hs_in_d_t(X_star_NR_d_t):
"""(20)
Args:
X_star_NR_d_t: 日付dの時刻tにおける負荷バランス時の非居室の絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける負荷バランス時の熱源機の入口における絶対湿度(kg/kg(DA))
"""
return X_star_NR_d_t
# ============================================================================
# 9.4 熱源機の出口における要求空気温度・絶対湿度
# ============================================================================
def get_Theta_req_d_t_i(Theta_sur_d_t_i, Theta_star_HBR_d_t, V_dash_supply_d_t_i, L_star_H_d_t_i, L_star_CS_d_t_i,
l_duct_i, region):
"""(21-1)(21-2)(21-3)
Args:
Theta_sur_d_t_i: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
L_star_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の暖房負荷(MJ/h)
L_star_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
l_duct_i: ダクトの長さ(m)
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの熱源機の出口における要求空気温度(℃)
"""
c_p_air = get_c_p_air()
rho_air = get_rho_air()
# ダクトiの線熱損失係数(W/(m・K))
phi_i = get_phi_i()
H, C, M = get_season_array_d_t(region)
Theta_req_d_t_i = np.zeros((5, 24 * 365))
e_exp_H = (phi_i[:, np.newaxis] * l_duct_i[:, np.newaxis] * 3600) / (c_p_air * rho_air * V_dash_supply_d_t_i[:, H])
# 暖房期 (21-1)
Theta_req_d_t_i[:, H] = Theta_sur_d_t_i[:, H] \
+ (Theta_star_HBR_d_t[H] + (L_star_H_d_t_i[:, H] * 10 ** 6) \
/ (c_p_air * rho_air * V_dash_supply_d_t_i[:, H]) - Theta_sur_d_t_i[:, H]) \
* np.exp(e_exp_H)
# 暖冷房区画iの熱源機の出口における要求空気温度が負荷バランス時の居室の室温を下回る場合
Theta_req_d_t_i[:, H] = np.clip(Theta_req_d_t_i[:, H], Theta_star_HBR_d_t[H], None)
# 冷房期 (21-2)
e_exp_C = (phi_i[:, np.newaxis] * l_duct_i[:, np.newaxis] * 3600) / (c_p_air * rho_air * V_dash_supply_d_t_i[:, C])
Theta_req_d_t_i[:, C] = Theta_sur_d_t_i[:, C] \
- (Theta_sur_d_t_i[:, C] - Theta_star_HBR_d_t[C] + (L_star_CS_d_t_i[:, C] * 10 ** 6) \
/ (c_p_air * rho_air * V_dash_supply_d_t_i[:, C])) \
* np.exp(e_exp_C)
# 暖冷房区画iの熱源機の出口における要求空気温度が負荷バランス時の居室の室温を上回る場合
Theta_req_d_t_i[:, C] = np.clip(Theta_req_d_t_i[:, C], None, Theta_star_HBR_d_t[C])
#中間期 (10-3)
Theta_req_d_t_i[:, M] = Theta_star_HBR_d_t[M]
return Theta_req_d_t_i
def get_X_req_d_t_i(X_star_HBR_d_t, L_star_CL_d_t_i, V_dash_supply_d_t_i, region):
"""(22-1)(22-2)
Args:
X_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA))
L_star_CL_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の冷房潜熱負荷(MJ/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの熱源機の出口における要求絶対湿度(kg/kg(DA))
"""
rho_air = get_rho_air()
L_wtr = get_L_wtr()
H, C, M = get_season_array_d_t(region)
# 暖房期および中間期 (22-1)
HM = np.logical_or(H, M)
X_req_d_t_i = np.zeros((5, 24 * 365))
X_req_d_t_i[:, HM] = X_star_HBR_d_t[HM]
# 冷房期 (22-2)
X_req_d_t_i[:, C] = X_star_HBR_d_t[C] - (L_star_CL_d_t_i[:, C] * 10 ** 3) / (rho_air * L_wtr * V_dash_supply_d_t_i[:, C])
return X_req_d_t_i
# ============================================================================
# 9.5 熱源機の最大出力
# ============================================================================
# ============================================================================
# 9.5.1 熱源機の最大暖房出力
# ============================================================================
def get_Q_hs_max_H_d_t(q_hs_rtd_H, C_df_H_d_t):
"""(23)
Args:
q_hs_rtd_H: 熱源機の定格暖房能力 (W)
C_df_H_d_t: 日付dの時刻tにおけるデフロストに関する暖房出力補正係数(-)
Returns:
熱源機の最大暖房出力 (MJ/h)
"""
alpha_max_H = get_alpha_max_H()
Q_hs_max_H_d_t = np.zeros(24 * 365)
if q_hs_rtd_H is not None:
Q_hs_max_H_d_t = q_hs_rtd_H * alpha_max_H * C_df_H_d_t * 3600 * 10 ** -6
return Q_hs_max_H_d_t
def get_alpha_max_H():
""":return: 定格暖房能力に対する最大暖房能力の比(-)"""
return 1.00
def get_C_df_H_d_t(Theta_ex_d_t, h_ex_d_t):
"""(24-1)(24-2)
Args:
Theta_ex_d_t: 日付dの時刻tにおける外気温度(℃)
h_ex_d_t: 日付dの時刻tにおける外気相対湿度(%)
Returns:
日付dの時刻tにおけるデフロストに関する暖房出力補正係数(-)
"""
C_df_H_d_t = np.ones(24 * 365)
C_df_H_d_t[np.logical_and(Theta_ex_d_t < 5, h_ex_d_t > 80)] = 0.77
return C_df_H_d_t
# ============================================================================
# 9.5.2 熱源機の最大冷房出力
# ============================================================================
# 1時間当たりの熱源機の最大冷房顕熱出力 (24)
def get_Q_hs_max_CS_d_t(Q_hs_max_C_d_t, SHF_dash_d_t):
"""(25)
Args:
Q_hs_max_C_d_t: 日付dの時刻tにおける1時間当たりの熱源機の最大冷房出力(MJ/h)
SHF_dash_d_t: 日付dの時刻tにおける冷房負荷補正顕熱比(-)
Returns:
日付dの時刻tにおける1時間当たりの熱源機の最大冷房顕熱出力(MJ/h)
"""
return Q_hs_max_C_d_t * SHF_dash_d_t
# 1時間当たりの熱源機の最大冷房潜熱出力 (25)
def get_Q_hs_max_CL_d_t(Q_hs_max_C_d_t, SHF_dash_d_t, L_star_dash_CL_d_t):
"""(26)
Args:
Q_hs_max_C_d_t: 日付dの時刻tにおける1時間当たりの熱源機の最大冷房出力(MJ/h)
SHF_dash_d_t: 日付dの時刻tにおける冷房負荷補正顕熱比(-)
L_star_dash_CL_d_t: 日付dの時刻tにおける補正冷房潜熱負荷(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの熱源機の最大冷房潜熱出力(MJ/h)
"""
return np.min([Q_hs_max_C_d_t * (1.0 - SHF_dash_d_t), L_star_dash_CL_d_t], axis=0)
# 最大冷房出力 [MJ/h] (27)
def get_Q_hs_max_C_d_t(q_hs_rtd_C):
"""(27)
Args:
q_hs_rtd_C: 熱源機の冷房時の定格出力[m^3/h]
Returns:
最大冷房出力 [MJ/h]
"""
alpha_max_C = get_alpha_max_C()
Q_hs_max_C_d_t = np.zeros(24 * 365)
if q_hs_rtd_C is not None:
Q_hs_max_C_d_t = q_hs_rtd_C * alpha_max_C * 3600 * 10 ** -6
return Q_hs_max_C_d_t
def get_alpha_max_C():
""":return: 定格冷房能力に対する最大冷房能力の比(-)"""
return 1.11
# 冷房負荷補正顕熱比 (28)
def get_SHF_dash_d_t(L_star_CS_d_t, L_star_dash_C_d_t):
"""(28)
Args:
L_star_CS_d_t: 日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
L_star_dash_C_d_t: 日付dの時刻tにおける補正冷房負荷(MJ/h)
Returns:
日付dの時刻tにおける冷房負荷補正顕熱比(-)
"""
SHF_dash_d_t = np.zeros(24 * 365)
f = L_star_dash_C_d_t > 0
SHF_dash_d_t[f] = L_star_CS_d_t[f] / L_star_dash_C_d_t[f]
return SHF_dash_d_t
# 1時間当たりの補正冷房負荷 (29)
def get_L_star_dash_C_d_t(L_star_CS_d_t, L_star_dash_CL_d_t):
"""(29)
Args:
L_star_CS_d_t: 日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
L_star_dash_CL_d_t: 日付dの時刻tにおける補正冷房潜熱負荷(MJ/h)
Returns:
日付dの時刻tにおける時間当たりの補正冷房負荷(MJ/h)
"""
return L_star_CS_d_t + L_star_dash_CL_d_t
def get_L_star_dash_CL_d_t(L_star_CL_max_d_t, L_star_CL_d_t):
"""(30)
Args:
L_star_CL_max_d_t: 日付dの時刻tにおける最大冷房潜熱負荷(MJ/h)
L_star_CL_d_t: 日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房潜熱負荷(MJ/h)
Returns:
日付dの時刻tにおける補正冷房潜熱負荷(MJ/h)
"""
return np.minimum(L_star_CL_max_d_t, L_star_CL_d_t)
# 1時間当たりの最大冷房潜熱負荷 (MJ/h)
def get_L_star_CL_max_d_t(L_star_CS_d_t):
"""(31)
Args:
L_star_CS_d_t: 日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
Returns:
日付dの時刻tにおける最大冷房潜熱負荷(MJ/h)
"""
# 冷房負荷最小顕熱比率 [-]
SHF_L_min_C = get_SHF_L_min_C()
return L_star_CS_d_t * ((1.0 - SHF_L_min_C) / SHF_L_min_C)
def get_SHF_L_min_C():
""":return: 冷房負荷最小顕熱比率 (-)"""
return 0.4
def get_L_star_CS_d_t(L_star_CS_d_t_i):
"""(32)
Args:
get_L_star_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
L_star_CS_d_t_i: returns: 日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
"""
return np.sum(L_star_CS_d_t_i[:5, :], axis=0)
def get_L_star_CL_d_t(L_star_CL_d_t_i):
"""(33)
Args:
get_L_star_CL_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の冷房潜熱負荷(MJ/h)
L_star_CL_d_t_i: returns: 日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房潜熱負荷(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの熱取得を含む負荷バランス時の冷房潜熱負荷(MJ/h)
"""
return np.sum(L_star_CL_d_t_i[:5, :], axis=0)
# ============================================================================
# 9.6 熱源機の風量
# ============================================================================
def get_V_hs_supply_d_t(V_supply_d_t_i):
"""(34)
Args:
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
Returns:
日付dの時刻tにおける熱源機の風量(m3/h)
"""
return np.sum(V_supply_d_t_i[:5, :], axis=0)
def get_V_hs_vent_d_t(V_vent_g_i, general_ventilation):
"""(35-1)(35-2)
Args:
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
general_ventilation: 全版換気の機能
Returns:
日付dの時刻tにおける熱源機の風量のうちの全般換気分(m3/h)
"""
# (35-2)
V_hs_vent_d_t = np.zeros(24 * 365)
# 当該システムが全般換気の機能を有する場合 (35-1)
if general_ventilation == True:
V_vent_g = np.sum(V_vent_g_i[:5], axis=0)
V_hs_vent_d_t = np.repeat(V_vent_g, 24 * 365)
elif general_ventilation == False:
pass
else:
raise ValueError(general_ventilation)
return V_hs_vent_d_t
# ============================================================================
# 9.7 VAV調整前の熱源機の風量
# ============================================================================
def get_V_dash_hs_supply_d_t(V_hs_min, V_hs_dsgn_H, V_hs_dsgn_C, Q_hs_rtd_H, Q_hs_rtd_C, Q_hat_hs_d_t, region):
"""(36-1)(36-2)(36-3)
Args:
V_hs_min: 熱源機の最低風量(m3/h)
V_hs_dsgn_H: 暖房時の設計風量(m3/h)
V_hs_dsgn_C: 冷房時の設計風量(m3/h)
Q_hs_rtd_H: 熱源機の暖房時の定格出力(MJ/h)
Q_hs_rtd_C: 熱源機の冷房時の定格出力(MJ/h)
Q_hat_hs_d_t: 日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の出力(MJ/h)
region: 地域区分
Returns:
日付dの時刻tにおけるVAV調整前の熱源機の風量(m3/h)
"""
H, C, M = get_season_array_d_t(region)
V_dash_hs_supply_d_t = np.zeros(24 * 365)
# 暖房期:熱源機の出力が負の値に場合
f1 = np.logical_and(H, Q_hat_hs_d_t < 0)
# 暖房期:熱源機の出力が正で出力が定格出力を超えない場合
if Q_hs_rtd_H is not None:
f2 = np.logical_and(H, np.logical_and(0 <= Q_hat_hs_d_t, Q_hat_hs_d_t < Q_hs_rtd_H))
# 暖房期出力が定格出力を超えた場合
if Q_hs_rtd_H is not None:
f3 = np.logical_and(H, Q_hat_hs_d_t >= Q_hs_rtd_H)
# 冷房期:熱源機の出力が負の値に場合
f4 = np.logical_and(C, Q_hat_hs_d_t < 0)
# 冷房期:熱源機の出力が正で出力が定格出力を超えない場合
if Q_hs_rtd_C is not None:
f5 = np.logical_and(C, np.logical_and(0 <= Q_hat_hs_d_t, Q_hat_hs_d_t < Q_hs_rtd_C))
# 冷房期:出力が定格出力を超えた場合
if Q_hs_rtd_C is not None:
f6 = np.logical_and(C, Q_hat_hs_d_t >= Q_hs_rtd_C)
# 暖房期 (36-1)
# 熱源機の出力が負の値に場合
V_dash_hs_supply_d_t[f1] = V_hs_min
# 熱源機の出力が正で出力が定格出力を超えない場合
if Q_hs_rtd_H is not None:
V_dash_hs_supply_d_t[f2] = (V_hs_dsgn_H - V_hs_min) / Q_hs_rtd_H * Q_hat_hs_d_t[f2] + V_hs_min
# 出力が定格出力を超えた場合
if V_hs_dsgn_H is not None:
V_dash_hs_supply_d_t[f3] = V_hs_dsgn_H
# 冷房期 (36-2)
# 熱源機の出力が負の値に場合
V_dash_hs_supply_d_t[f4] = V_hs_min
# 熱源機の出力が正で出力が定格出力を超えない場合
if Q_hs_rtd_C is not None:
V_dash_hs_supply_d_t[f5] = (V_hs_dsgn_C - V_hs_min) / Q_hs_rtd_C * Q_hat_hs_d_t[f5] + V_hs_min
# 出力が定格出力を超えた場合
if V_hs_dsgn_C is not None:
V_dash_hs_supply_d_t[f6] = V_hs_dsgn_C
# 中間期 (36-3)
V_dash_hs_supply_d_t[M] = V_hs_min
return V_dash_hs_supply_d_t
def get_Q_hs_rtd_H(q_hs_rtd_H):
"""(37)
Args:
q_hs_rtd_H: 熱源機の定格暖房能力(W)
Returns:
暖房時の熱源機の定格出力(MJ/h)
"""
if q_hs_rtd_H is not None:
return q_hs_rtd_H * 3600 * 10 ** -6
else:
return None
def get_Q_hs_rtd_C(q_hs_rtd_C):
"""(38)
Args:
q_hs_rtd_C: 熱源機の定格冷房能力(W)
Returns:
冷房時の熱源機の定格出力(MJ/h)
"""
if q_hs_rtd_C is not None:
return q_hs_rtd_C * 3600 * 10 ** -6
else:
return None
def get_V_hs_min(V_vent_g_i):
"""(39)
Args:
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
Returns:
熱源機の最低風量(m3/h)
"""
return np.sum(V_vent_g_i[:5], axis=0)
def calc_Q_hat_hs_d_t(Q, A_A, V_vent_l_d_t, V_vent_g_i, mu_H, mu_C, J_d_t, q_gen_d_t, n_p_d_t, q_p_H, q_p_CS, q_p_CL, X_ex_d_t, w_gen_d_t, Theta_ex_d_t, L_wtr, region):
"""(40-1a)(40-1b)(40-2a)(40-2b)(40-2c)(40-3)
Args:
Q: 当該住戸の熱損失係数(W/(m2・K))
A_A: 床面積の合計(m2)
V_vent_l_d_t: 日付dの時刻tにおける局所換気量(m3/h)
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
mu_H: 当該住戸の暖房期の日射取得係数((W/m2)/(W/m2))
mu_C: 当該住戸の冷房期の日射取得係数((W/m2)/(W/m2))
J_d_t: 日付dの時刻tにおける水平面全天日射量(W/m2)
q_gen_d_t: 日付dの時刻tにおける内部発熱(W)
n_p_d_t: 日付dの時刻tにおける在室人数(人)
q_p_H: 暖房期における人体からの1人当たりの顕熱発熱量(W/人)
q_p_CS: 冷房期における人体からの1人当たりの顕熱発熱量(W/人)
q_p_CL: 冷房期における人体からの1人当たりの潜熱発熱量(W/人)
X_ex_d_t: 日付dの時刻tにおける外気の絶対湿度(kg/kg(DA))
w_gen_d_t: param Theta_ex_d_t: 日付dの時刻tにおける外気温度(℃)
L_wtr: 水の蒸発潜熱(kJ/kg)
region: 地域区分
Theta_ex_d_t: returns: 日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h)
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
Theta_set_H = get_Theta_set_H()
Theta_set_C = get_Theta_set_C()
X_set_C = get_X_set_C()
Q_hat_hs_d_t = np.zeros(24 * 365)
Q_hat_hs_H_d_t = np.zeros(24 * 365)
Q_hat_hs_CS_d_t = np.zeros(24 * 365)
Q_hat_hs_CL_d_t = np.zeros(24 * 365)
# 暖房期 (40-1b)
if mu_H is not None:
Q_hat_hs_H_d_t[H] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[H] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_set_H - Theta_ex_d_t[H]) \
- mu_H * A_A * J_d_t[H] - q_gen_d_t[H] - n_p_d_t[H] * q_p_H) * 3600 * 10 ** -6
# (40-1a)
Q_hat_hs_d_t[H] = np.clip(Q_hat_hs_H_d_t[H], 0, None)
# 冷房期 (40-2b)
Q_hat_hs_CS_d_t[C] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_ex_d_t[C] - Theta_set_C) \
+ mu_C * A_A * J_d_t[C] + q_gen_d_t[C] + n_p_d_t[C] * q_p_CS) * 3600 * 10 ** -6
# (40-2c)
Q_hat_hs_CL_d_t[C] = ((rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5])) * (X_ex_d_t[C] - X_set_C) * 10 ** 3 + w_gen_d_t[C]) \
* L_wtr + n_p_d_t[C] * q_p_CL * 3600) * 10 ** -6
# (40-2a)
Q_hat_hs_d_t[C] = np.clip(Q_hat_hs_CS_d_t[C], 0, None) + np.clip(Q_hat_hs_CL_d_t[C], 0, None)
# 中間期 (40-3)
Q_hat_hs_d_t[M] = 0
return Q_hat_hs_d_t
# ============================================================================
# 10 吹き出し口
# ============================================================================
# ============================================================================
# 10.1 吹き出し空気温度
# ============================================================================
def get_Thata_supply_d_t_i(Theta_sur_d_t_i, Theta_hs_out_d_t, Theta_star_HBR_d_t, l_duct_i,
V_supply_d_t_i, L_star_H_d_t_i, L_star_CS_d_t_i, region):
"""(41-1)(41-2)(41-3)
Args:
Theta_sur_d_t_i: 日付dの時刻tにおけるダクトiの周囲の空気温度(℃)
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
l_duct_i: ダクトiの長さ(m)
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
L_star_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の暖房負荷(MJ/h)
L_star_CS_d_t_i: param region: 地域区分
Theta_hs_out_d_t: 日付dの時刻tにおける熱源機の出口における空気温度(℃)
region: returns: 日付dの時刻tにおける暖冷房区画iの吹き出し温度(℃)
Returns:
日付dの時刻tにおける暖冷房区画iの吹き出し温度(℃)
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
phi_i = get_phi_i()
Thata_supply_d_t_i = np.zeros((5, 24 * 365))
f1 = np.logical_and(H, np.sum(L_star_H_d_t_i[:5, :], axis=0) > 0)
f2 = np.logical_and(H, np.sum(L_star_H_d_t_i[:5, :], axis=0) <= 0)
f3 = np.logical_and(C, np.sum(L_star_CS_d_t_i[:5, :], axis=0) > 0)
f4 = np.logical_and(C, np.sum(L_star_CS_d_t_i[:5, :], axis=0) <= 0)
# 暖房期 (41-1)
e_exp_H = -(phi_i[:, np.newaxis] * l_duct_i[:, np.newaxis] * 3600) / (c_p_air * rho_air * V_supply_d_t_i[:, f1])
Thata_supply_d_t_i[:, f1] = Theta_sur_d_t_i[:, f1] + (Theta_hs_out_d_t[f1] - Theta_sur_d_t_i[:, f1]) \
* np.exp(e_exp_H)
Thata_supply_d_t_i[:, f2] = Theta_star_HBR_d_t[f2]
# 冷房期 (41-2)
e_exp_C = -(phi_i[:, np.newaxis] * l_duct_i[:, np.newaxis] * 3600) / (c_p_air * rho_air * V_supply_d_t_i[:, f3])
Thata_supply_d_t_i[:, f3] = Theta_sur_d_t_i[:, f3] + (Theta_hs_out_d_t[f3] - Theta_sur_d_t_i[:, f3]) \
* np.exp(e_exp_C)
Thata_supply_d_t_i[:, f4] = Theta_star_HBR_d_t[f4]
# 中間期 (41-3)
Thata_supply_d_t_i[:, M] = Theta_star_HBR_d_t[M]
return Thata_supply_d_t_i
# ============================================================================
# 10.2 吹き出し絶対湿度
# ============================================================================
def get_X_supply_d_t_i(X_star_HBR_d_t, X_hs_out_d_t, L_star_CL_d_t_i, region):
"""(42-1)(42-2)
Args:
X_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA))
X_hs_out_d_t: 日付dの時刻tにおける熱源機の出口における絶対湿度(kg/kg(DA))
L_star_CL_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の冷房潜熱負荷(MJ/h)
region: 地域区分
Returns:
日付dの時刻tにおける暖冷房区画iの吹き出し絶対湿度(kg/kg(DA))
"""
H, C, M = get_season_array_d_t(region)
X_supply_d_t_i = np.zeros((5, 24 * 365))
# 暖房期および中間期 (42-1)
HM = np.logical_or(H, M)
X_supply_d_t_i[:, HM] = X_star_HBR_d_t[HM]
# 冷房期 (42-2)
f1 = np.logical_and(C, np.sum(L_star_CL_d_t_i[:5, :], axis=0) > 0)
f2 = np.logical_and(C, np.sum(L_star_CL_d_t_i[:5, :], axis=0) <= 0)
X_supply_d_t_i[:, f1] = X_hs_out_d_t[f1]
X_supply_d_t_i[:, f2] = X_star_HBR_d_t[f2]
return X_supply_d_t_i
# ============================================================================
# 10.3 吹き出し風量
# ============================================================================
def get_V_supply_d_t_i(L_star_H_d_t_i, L_star_CS_d_t_i, Theta_sur_d_t_i, l_duct_i, Theta_star_HBR_d_t, V_vent_g_i,
V_dash_supply_d_t_i, VAV, region, Theta_hs_out_d_t):
"""(43-1)(43-2)(43-3)(43-4)(43-5)
Args:
L_star_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の暖房負荷(MJ/h)
L_star_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱取得を含む負荷バランス時の冷房顕熱負荷(MJ/h)
Theta_sur_d_t_i: 日付dの時刻tにおけるダクトiの周囲の空気温度(℃)
l_duct_i: ダクトiの長さ(m)
Theta_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
VAV: VAV
region: 地域区分
Theta_hs_out_d_t: 日付dの時刻tにおける熱源機の出口における空気温度(℃)
Theta_star_HBR_d_t: returns: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
Returns:
日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
phi_i = get_phi_i()
V_supply_d_t_i = np.zeros((5, 24 * 365))
V_vent_g_i = np.reshape(V_vent_g_i, (5, 1))
V_vent_g_i = V_vent_g_i.repeat(24 * 365, axis=1)
if VAV == True:
# 暖房期 (43-1)
f1 = np.logical_and(H, np.logical_and(Theta_hs_out_d_t > Theta_star_HBR_d_t,
np.sum(L_star_H_d_t_i[:5, :], axis=0) > 0))
term2_H = (Theta_hs_out_d_t[f1] - Theta_sur_d_t_i[:, f1]) * phi_i[:, np.newaxis] * l_duct_i[:, np.newaxis] * 3600
V_supply_d_t_i[:, f1] = (L_star_H_d_t_i[:, f1] * 10 ** 6 + term2_H) / \
(c_p_air * rho_air * (Theta_hs_out_d_t[f1] - Theta_star_HBR_d_t[f1]))
f2 = np.logical_and(H, np.logical_or(Theta_hs_out_d_t <= Theta_star_HBR_d_t, np.sum(L_star_H_d_t_i[:5, :], axis=0) <= 0))
V_supply_d_t_i[:, f2] = V_vent_g_i[:, f2]
# 冷房期 (43-2)
f3 = np.logical_and(C, np.logical_and(Theta_hs_out_d_t < Theta_star_HBR_d_t,
np.sum(L_star_CS_d_t_i[:5, :], axis=0) > 0))
term2_C = (Theta_sur_d_t_i[:, f3] - Theta_hs_out_d_t[f3]) * phi_i[:, np.newaxis] * l_duct_i[:, np.newaxis] * 3600
V_supply_d_t_i[:, f3] = (L_star_CS_d_t_i[:, f3] * 10 ** 6 + term2_C) / \
(c_p_air * rho_air * (Theta_star_HBR_d_t[f3] - Theta_hs_out_d_t[f3]))
f4 = np.logical_and(C, np.logical_or(Theta_hs_out_d_t >= Theta_star_HBR_d_t,
np.sum(L_star_CS_d_t_i[:5, :], axis=0) <= 0))
V_supply_d_t_i[:, f4] = V_vent_g_i[:, f4]
# 中間期 (43-3)
V_supply_d_t_i[:, M] = V_vent_g_i[:, M]
elif VAV == False:
# 暖房期および冷房期 (43-4)
HC = np.logical_or(H, C)
V_supply_d_t_i[:, HC] = V_dash_supply_d_t_i[:, HC]
# 中間期 (43-5)
V_supply_d_t_i[:, M] = V_vent_g_i[:, M]
else:
raise ValueError(VAV)
# 吹き出し風量V_(supply,d,t,i)は、VAV調整前の吹き出し風量V_(supply,d,t,i)^'を上回る場合はVAV調整前の \
# 吹き出し風量V_(supply,d,t,i)^'に等しいとし、全般換気量V_(vent,g,i)を下回る場合は全般換気量V_(vent,g,i)に等しいとする
V_supply_d_t_i = np.clip(V_supply_d_t_i, V_vent_g_i, V_dash_supply_d_t_i)
return V_supply_d_t_i
# ============================================================================
# 10.4 VAV調整前の吹き出し風量
# ============================================================================
def get_V_dash_supply_d_t_i(r_supply_des_i, V_dash_hs_supply_d_t, V_vent_g_i):
"""(44)
Args:
r_supply_des_i: 暖冷房区画iの風量バランス(-)
V_dash_hs_supply_d_t: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
Returns:
日付dの時刻tにおけるVAV調整前の熱源機の風量(m3/h)
"""
return np.maximum(r_supply_des_i[:5, np.newaxis] * V_dash_hs_supply_d_t, V_vent_g_i[:5, np.newaxis])
def get_r_supply_des_i(A_HCZ_i):
"""(45)
Args:
A_HCZ_i: 暖冷房区画iの床面積(m2)
Returns:
暖冷房区画iの風量バランス(-)
"""
return A_HCZ_i / np.sum(A_HCZ_i[:5])
# ============================================================================
# 11 暖冷房区画
# ============================================================================
# ============================================================================
# 11.1 実際の居室の室温・絶対湿度
# ============================================================================
def get_Theta_HBR_d_t_i(Theta_star_HBR_d_t, V_supply_d_t_i, Theta_supply_d_t_i, U_prt, A_prt_i, Q, A_HCZ_i, L_star_H_d_t_i, L_star_CS_d_t_i, region):
"""(46-1)(46-2)(46-3)
Args:
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
Theta_supply_d_t_i: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
U_prt: 間仕切りの熱貫流率(W/(m2・K))
A_prt_i: 暖冷房区画iから見た非居室の間仕切りの面積(m2)
Q: 当該住戸の熱損失係数(W/(m2・K))
A_HCZ_i: 暖冷房区画iの床面積(m2)
L_star_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱取得を含む実際の暖房負荷(MJ/h)
L_star_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの間仕切りの熱取得を含む実際の冷房顕熱負荷(MJ/h)
region: 地域区分
Returns:
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
Theta_HBR_d_t_i = np.zeros((5, 24 * 365))
# A_HCZ_i = np.reshape(A_HCZ_i, (5, 0))
# 暖房期 (46-1)
Theta_HBR_d_t_i[:, H] = Theta_star_HBR_d_t[H] + (c_p_air * rho_air * V_supply_d_t_i[:, H] * \
(Theta_supply_d_t_i[:, H] - Theta_star_HBR_d_t[H]) - L_star_H_d_t_i[:, H] * 10 ** 6) / \
(c_p_air * rho_air * V_supply_d_t_i[:, H] + (U_prt * A_prt_i[:, np.newaxis] + Q * A_HCZ_i[:, np.newaxis]) * 3600)
# 暖冷房区画iの実際の居室の室温θ_(HBR,d,t,i)は、暖房期において負荷バランス時の居室の室温θ_(HBR,d,t)^*を下回る場合、
# 負荷バランス時の居室の室温θ_(HBR,d,t)^*に等しい
Theta_HBR_d_t_i[:, H] = np.clip(Theta_HBR_d_t_i[:, H], Theta_star_HBR_d_t[H], None)
# 冷房期 (46-2)
Theta_HBR_d_t_i[:, C] = Theta_star_HBR_d_t[C] - (c_p_air * rho_air * V_supply_d_t_i[:, C] * \
(Theta_star_HBR_d_t[C] - Theta_supply_d_t_i[:, C]) - L_star_CS_d_t_i[:, C] * 10 ** 6) / \
(c_p_air * rho_air * V_supply_d_t_i[:, C] + (U_prt * A_prt_i[:, np.newaxis] + Q * A_HCZ_i[:, np.newaxis]) * 3600)
# 冷房期において負荷バランス時の居室の室温θ_(HBR,d,t)^*を上回る場合、負荷バランス時の居室の室温θ_(HBR,d,t)^*に等しい
Theta_HBR_d_t_i[:, C] = np.clip(Theta_HBR_d_t_i[:, C], None, Theta_star_HBR_d_t[C])
# 中間期 (46-3)
Theta_HBR_d_t_i[:, M] = Theta_star_HBR_d_t[M]
return Theta_HBR_d_t_i
def get_X_HBR_d_t_i(X_star_HBR_d_t):
"""(47)
Args:
X_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける暖冷房区画iの実際の居室の絶対湿度(kg/kg(DA))
"""
X_star_HBR_d_t_i = np.tile(X_star_HBR_d_t, (5, 1))
return X_star_HBR_d_t_i
# ============================================================================
# 11.2 実際の非居室の室温・絶対湿度
# ============================================================================
def get_Theta_NR_d_t(Theta_star_NR_d_t, Theta_star_HBR_d_t, Theta_HBR_d_t_i, A_NR, V_vent_l_NR_d_t, V_dash_supply_d_t_i, V_supply_d_t_i, U_prt, A_prt_i, Q):
"""(48a)(48b)(48c)(48d)
Args:
Theta_star_NR_d_t: 日付dの時刻tにおける実際の非居室の室温(℃)
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
Theta_HBR_d_t_i: 日付dの時刻tにおける暖冷房区画iの実際の居室の室温(℃)
A_NR: 非居室の床面積(m2)
V_vent_l_NR_d_t: 日付dの時刻tにおける非居室の局所換気量(m3/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
V_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iの吹き出し風量(m3/h)
U_prt: 間仕切りの熱貫流率(W/(m2・K))
A_prt_i: 暖冷房区画iから見た非居室の間仕切りの面積(m2)
Q: 当該住戸の熱損失係数(W/(m2・K))
Returns:
日付dの時刻tにおける実際の非居室の室温
"""
c_p_air = get_c_p_air()
rho_air = get_rho_air()
# (48d)
k_dash_d_t_i = c_p_air * rho_air * (V_dash_supply_d_t_i / 3600) + U_prt * A_prt_i[:, np.newaxis]
# (48c)
k_prt_d_t_i = c_p_air * rho_air * (V_supply_d_t_i / 3600) + U_prt * A_prt_i[:, np.newaxis]
# (48b)
k_evp_d_t = (Q - 0.35 * 0.5 * 2.4) * A_NR + c_p_air * rho_air * (V_vent_l_NR_d_t / 3600)
# (48a)
Theta_NR_d_t = Theta_star_NR_d_t + (-1 * np.sum(k_dash_d_t_i[:5] * (Theta_star_HBR_d_t - Theta_star_NR_d_t), axis=0) + \
np.sum(k_prt_d_t_i[:5] * (Theta_HBR_d_t_i[:5] - Theta_star_NR_d_t), axis=0)) / \
(k_evp_d_t + np.sum(k_prt_d_t_i[:5], axis=0))
return Theta_NR_d_t
def get_X_NR_d_t(X_star_NR_d_t):
"""(49)
Args:
X_star_NR_d_t: 日付dの時刻tにおける非居室の負荷バランス時の絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける実際の非居室の絶対湿度(kg/kg(DA))
"""
return X_star_NR_d_t
# ============================================================================
# 11.3 負荷バランス時の居室の室温・絶対湿度
# ============================================================================
def get_Theta_star_HBR_d_t(Theta_ex_d_t, region):
"""(50-1)(50-2)(50-3)
Args:
Theta_ex_d_t: 日付dの時刻tにおける外気温度(℃)
region: 地域区分
Returns:
日付dの時刻tにおける負荷バランス時の居室の室温(℃)
"""
H, C, M = get_season_array_d_t(region)
Theta_set_H = get_Theta_set_H()
Theta_set_C = get_Theta_set_C()
Theta_star_HBR_d_t = np.zeros(24 * 365)
# 暖房期
Theta_star_HBR_d_t[H] = Theta_set_H
# 冷房期
Theta_star_HBR_d_t[C] = Theta_set_C
# 中間期
f1 = np.logical_and(M, np.logical_and(Theta_set_H <= Theta_ex_d_t, Theta_ex_d_t<= Theta_set_C))
Theta_star_HBR_d_t[f1] = Theta_ex_d_t[f1]
f2 = np.logical_and(M, Theta_ex_d_t > Theta_set_C)
Theta_star_HBR_d_t[f2] = Theta_set_C
f3 = np.logical_and(M, Theta_ex_d_t < Theta_set_H)
Theta_star_HBR_d_t[f3] = Theta_set_H
return Theta_star_HBR_d_t
def get_X_star_HBR_d_t(X_ex_d_t, region):
"""(51-1)(51-2)(51-3)
Args:
X_ex_d_t: 日付dの時刻tにおける外気絶対湿度(kg/kg(DA))
region: 地域区分
Returns:
日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA))
"""
H, C, M = get_season_array_d_t(region)
X_set_C = get_X_set_C()
X_star_HBR_d_t = np.zeros(24 * 365)
# 暖房期
X_star_HBR_d_t[H] = X_ex_d_t[H]
# 冷房期
X_star_HBR_d_t[C] = X_set_C
# 中間期
X_star_HBR_d_t[M] = X_ex_d_t[M]
return X_star_HBR_d_t
# ============================================================================
# 11.4 負荷バランス時の非居室の室温・絶対湿度
# ============================================================================
def get_Theta_star_NR_d_t(Theta_star_HBR_d_t, Q, A_NR, V_vent_l_NR_d_t, V_dash_supply_d_t_i, U_prt, A_prt_i, L_H_d_t_i, L_CS_d_t_i, region):
"""(52-1)(52-2)(52-3)
Args:
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
Q: 当該住戸の熱損失係数(W/(m2・K))
A_NR: 非居室の床面積(m2)
V_vent_l_NR_d_t: 日付dの時刻tにおける非居室の局所換気量(m3/h)
V_dash_supply_d_t_i: 日付dの時刻tにおける暖冷房区画iのVAV調整前の吹き出し風量(m3/h)
U_prt: 間仕切りの熱貫流率(W/(m2・K))
A_prt_i: 暖冷房区画iから見た非居室の間仕切りの面積(m2)
L_H_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの暖房負荷(MJ/h)
L_CS_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの冷房顕熱負荷(MJ/h)
region: 地域区分
Returns:
日付dの時刻tにおける負荷バランス時の非居室の室温(℃)
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
Theta_star_NR_d_t = np.zeros(24 * 365)
# 暖房期 (52-1)
Theta_star_NR_d_t[H] = Theta_star_HBR_d_t[H] - np.sum(L_H_d_t_i[5:12, H], axis=0) / \
((Q - 0.35 * 0.5 * 2.4) * A_NR + c_p_air * rho_air * (V_vent_l_NR_d_t[H] / 3600) + \
np.sum(c_p_air * rho_air * (V_dash_supply_d_t_i[:5, H] / 3600) + U_prt * A_prt_i[:5, np.newaxis], axis=0)) * \
(10 ** 6 / 3600)
# 冷房期 (52-2)
Theta_star_NR_d_t[C] = Theta_star_HBR_d_t[C] + np.sum(L_CS_d_t_i[5:12, C], axis=0) / \
((Q - 0.35 * 0.5 * 2.4) * A_NR + c_p_air * rho_air * (V_vent_l_NR_d_t[C] / 3600) + \
np.sum(c_p_air * rho_air * (V_dash_supply_d_t_i[:5, C] / 3600) + U_prt * A_prt_i[:5, np.newaxis], axis=0)) * \
(10 ** 6 / 3600)
# 中間期 (52-3)
Theta_star_NR_d_t[M] = Theta_star_HBR_d_t[M]
return Theta_star_NR_d_t
def get_X_star_NR_d_t(X_star_HBR_d_t, L_CL_d_t_i, L_wtr, V_vent_l_NR_d_t, V_dash_supply_d_t_i, region):
"""(53-1)(53-2)(53-3)
Args:
X_star_HBR_d_t: param L_CL_d_t_i:
L_wtr: param V_vent_l_NR_d_t:
V_dash_supply_d_t_i: param region:
L_CL_d_t_i: param V_vent_l_NR_d_t:
region:
V_vent_l_NR_d_t:
Returns:
"""
H, C, M = get_season_array_d_t(region)
rho_air = get_rho_air()
X_star_NR_d_t = np.zeros(24 * 365)
# 暖房期 (53-1)
X_star_NR_d_t[H] = X_star_HBR_d_t[H]
# 冷房期 (53-2)
X_star_NR_d_t[C] = X_star_HBR_d_t[C] + (np.sum(L_CL_d_t_i[5:12, C], axis=0) \
/ (L_wtr * rho_air * (V_vent_l_NR_d_t[C] + np.sum(V_dash_supply_d_t_i[:5, C], axis=0)))) * 10 ** 3
# 中間期 (53-3)
X_star_NR_d_t[M] = X_star_HBR_d_t[M]
return X_star_NR_d_t
# ============================================================================
# 12 ダクト
# ============================================================================
# ============================================================================
# 12.1 ダクトの周囲の空気温度
# ============================================================================
def get_Theta_sur_d_t_i(Theta_star_HBR_d_t, Theta_attic_d_t, l_duct_in_i, l_duct_ex_i, duct_insulation):
"""(54-1)(54-2)
Args:
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
l_duct_in_i: 断熱区画内を通るダクトiの長さ(m)
l_duct_ex_i: 断熱区画外を通るダクトiの長さ(m)
Theta_attic_d_t: 小屋裏の空気温度 (℃)
duct_insulation: ダクトが通過する空間
Returns:
日付dの時刻tにおけるダクトiの周囲の空気温度(℃)
"""
Theta_sur_H_d_t_i = np.zeros((5, 24 * 365))
Theta_star_HBR_d_t_i = np.tile(Theta_star_HBR_d_t, (5, 1))
Theta_attic_d_t_i = np.tile(Theta_attic_d_t, (5, 1))
if duct_insulation == '全てもしくは一部が断熱区画外である':
Theta_sur_H_d_t_i = (l_duct_in_i[:, np.newaxis] * Theta_star_HBR_d_t_i + l_duct_ex_i[:, np.newaxis] * Theta_attic_d_t_i) / \
(l_duct_in_i[:, np.newaxis] + l_duct_ex_i[:, np.newaxis])
elif duct_insulation == '全て断熱区画内である':
Theta_sur_H_d_t_i = Theta_star_HBR_d_t_i
else:
raise ValueError(duct_insulation)
return Theta_sur_H_d_t_i
def get_Theta_attic_d_t(Theta_SAT_d_t, Theta_star_HBR_d_t):
"""(55)
Args:
Theta_SAT_d_t: 日付dの時刻tにおける水平面における等価外気温度(℃)
Theta_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の室温(℃)
Returns:
小屋裏の空気温度 (℃)
"""
# 温度差係数
H = get_H()
return Theta_SAT_d_t * H + Theta_star_HBR_d_t * (1.0 - H)
# 温度差係数 (-)
def get_H():
""" """
return 1.0
# ============================================================================
# 12.2 ダクトの長さ
# ============================================================================
def get_l_duct__i(l_duct_in_i, l_duct_ex_i):
"""(56)
Args:
l_duct_in_i: 断熱区画内を通るダクトiの長さ(m)
l_duct_ex_i: 断熱区画外を通るダクトiの長さ(m)
Returns:
ダクトiの長さ(m)
"""
return l_duct_in_i + l_duct_ex_i
def get_l_duct_in_i(A_A):
"""(57)
Args:
A_A: 床面積の合計(m2)
return: 断熱区画内を通るダクトiの長さ(m)
Returns:
"""
# 標準住戸の床面積の合計 [m3]
A_A_R = get_A_A_R()
return l_duct_in_R_i * np.sqrt(A_A / A_A_R)
def get_l_duct_ex_i(A_A):
"""(58)
Args:
A_A: 床面積の合計(m2)
return: 断熱区画外を通るダクトiの長さ(m)
Returns:
"""
# 標準住戸の床面積の合計 [m3]
A_A_R = get_A_A_R()
return l_duct_ex_R_i * np.sqrt(A_A / A_A_R)
# 断熱区画内を通るダクトの長さ [m]
l_duct_in_R_i = np.array([
25.6,
8.6,
0.0,
0.0,
0.0,
])
# 断熱区画外を通るダクトの長さ [m]
l_duct_ex_R_i = np.array([
0.0,
0.0,
10.2,
11.8,
8.1,
])
# ダクトの長さ(合計) [m]
l_duct_R_i = np.array([
25.6,
8.6,
10.2,
11.8,
8.1,
])
# ============================================================================
# 12.3 ダクトの熱損失係数
# ============================================================================
# ダクトiの線熱損失係数 [W/mK]
def get_phi_i():
""" """
return np.array([0.49] * 5)
# ============================================================================
# 13 その他
# ============================================================================
# ============================================================================
# 13.1 外気条件
# ============================================================================
def get_Theta_SAT_d_t(Theta_ex_d_t, J_d_t):
"""(59)
Args:
Thate_ex_d_t: 日付dの時刻tにおける外気温度(℃)
J_d_t: 日付dの時刻tにおける水平面全天日射量(W/m2)
Theta_ex_d_t: returns: 日付dの時刻tにおける水平面における等価外温度(℃)
Returns:
日付dの時刻tにおける水平面における等価外温度(℃)
"""
return Theta_ex_d_t + 0.034 * J_d_t
# ============================================================================
# 13.2 住宅の仕様
# ============================================================================
# ============================================================================
# 13.2.2 間仕切り
# ============================================================================
def get_A_prt_i(A_HCZ_i, r_env, A_MR, A_NR, A_OR):
"""(60-1)(60-2)
Args:
A_HCZ_i: 暖冷房区画iの床面積(m2)
r_env: 床面積の合計に対しる外皮の部位の面積の合計の比(-)
A_MR: 主たる居室の床面積(m2)
A_NR: 非居室の床面積(m2)
A_OR: その他の居室の床面積(m2)
Returns:
居室(i=1~5)に対する暖冷房区画iから見た非居室の間仕切りの面積(m2)
"""
A_XR = np.array([A_OR, A_MR, A_MR, A_MR, A_MR])
return np.array([A_HCZ_i[i] * r_env * (A_NR / (A_XR[i] + A_NR)) for i in range(5)])
def get_U_prt():
"""(61)
:return: 間仕切りの熱貫流率(W/(m2・K))
Args:
Returns:
"""
R_prt = get_R_prt()
return 1 / R_prt
def get_R_prt():
""":return: R_prt:間仕切りの熱抵抗((m2・K)/W)"""
return 0.46
# ============================================================================
# 13.2.4 機械換気量
# ============================================================================
# 暖冷房区画iの全般換気量
def get_V_vent_g_i(A_HCZ_i, A_HCZ_R_i):
"""(62)
Args:
A_HCZ_i: 暖冷房区画iの床面積 (m2)
A_HCZ_R_i: 標準住戸における暖冷房区画iの床面積(m2)
Returns:
ndarray[5]: 暖冷房区画iの機械換気量 (m3/h)
"""
# 標準住戸における暖冷房区画iの全般換気量 [m3/h]
V_vent_g_R_i = get_V_vent_g_R_i()
return V_vent_g_R_i * (np.array(A_HCZ_i[:5]) / np.array(A_HCZ_R_i[:5]))
# 表2 標準住戸における暖冷房区画iの全般換気量 [m3/h]
def get_V_vent_g_R_i():
""" """
return np.array([
60,
20,
40,
20,
20
])
# 局所換気
@lru_cache()
def calc_V_vent_l_d_t():
""" """
V_vent_l_MR_d_t = get_V_vent_l_MR_d_t()
V_vent_l_OR_d_t = get_V_vent_l_OR_d_t()
V_vent_l_NR_d_t = get_V_vent_l_NR_d_t()
return get_V_vent_l_d_t(V_vent_l_MR_d_t, V_vent_l_OR_d_t, V_vent_l_NR_d_t)
# 日付dの時刻tにおける局所換気量
def get_V_vent_l_d_t(V_vent_l_MR_d_t, V_vent_l_OR_d_t, V_vent_l_NR_d_t):
"""(63)
Args:
V_vent_l_MR_d_t: 日付dの時刻tにおける主たる居室の局所換気量(m3/h)
V_vent_l_OR_d_t: 日付dの時刻tにおけるその他の居室の局所換気量(m3/h)
V_vent_l_NR_d_t: 日付dの時刻tにおける非居室の局所換気量(m3/h)
Returns:
日付dの時刻tにおける局所換気量(m3/h)
"""
return V_vent_l_MR_d_t + V_vent_l_OR_d_t + V_vent_l_NR_d_t
# 日付dの時刻tにおける主たる居室の局所換気量(m3/h)
def get_V_vent_l_MR_d_t():
""":return: 日付dの時刻tにおける主たる居室の局所換気量(m3/h)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_3 = get_table_3()
# 全日平日とみなした24時間365日の局所換気量
tmp_a = np.tile(table_3[0], 365)
# 全日休日とみなした24時間365日の局所換気量
tmp_b = np.tile(table_3[1], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
V_vent_l_MR_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return V_vent_l_MR_d_t
# 日付dの時刻tにおけるその他の居室の局所換気量(m3/h)
def get_V_vent_l_OR_d_t():
""":return: 日付dの時刻tにおけるその他の居室の局所換気量(m3/h)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_3 = get_table_3()
# 全日平日とみなした24時間365日の局所換気量
tmp_a = np.tile(table_3[2], 365)
# 全日休日とみなした24時間365日の局所換気量
tmp_b = np.tile(table_3[3], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
V_vent_l_OR_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return V_vent_l_OR_d_t
# 日付dの時刻tにおける非居室の局所換気量(m3/h)
def get_V_vent_l_NR_d_t():
""":return: 日付dの時刻tにおける非居室の局所換気量(m3/h)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_3 = get_table_3()
# 全日平日とみなした24時間365日の局所換気量
tmp_a = np.tile(table_3[4], 365)
# 全日休日とみなした24時間365日の局所換気量
tmp_b = np.tile(table_3[5], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
V_vent_l_NR_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return V_vent_l_NR_d_t
# 局所換気量
def get_table_3():
""" """
return [
(0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 150, 150, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 75, 0, 0, 0, 0, 150, 150, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 6, 2, 0, 0.8, 0, 0, 0.8, 0, 0, 0, 0.8, 0.8, 0.8, 0.8, 0.8, 52, 25, 102.8),
(0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 1.2, 1.2, 0, 0, 0, 0, 2, 75.8, 25, 2, 0.8, 25, 27, 100.8),
]
# ============================================================================
# 13.2.5 内部発熱・発湿(人体を除く)
# ============================================================================
def get_q_gen_d_t(q_gen_MR_d_t, q_gen_OR_d_t, q_gen_NR_d_t):
"""(64a)
Args:
q_gen_MR_d_t: 日付dの時刻tにおける主たる居室の内部発熱(W)
q_gen_OR_d_t: 日付dの時刻tにおけるその他の居室の内部発熱(W)
q_gen_NR_d_t: 日付dの時刻tにおける非居室の内部発熱(W)
Returns:
日付dの時刻tにおける内部発熱(W)
"""
return q_gen_MR_d_t + q_gen_OR_d_t + q_gen_NR_d_t
def calc_q_gen_MR_d_t(A_MR):
"""(64b)
Args:
A_MR: 主たる居室の床面積(m2)
Returns:
"""
q_gen_MR_R_d_t = get_q_gen_MR_R_d_t()
return q_gen_MR_R_d_t * (A_MR / 29.81)
def calc_q_gen_OR_d_t(A_OR):
"""(64c)
Args:
A_OR: その他の居室の床面積(m2)
Returns:
"""
q_gen_OR_R_d_t = get_q_gen_OR_R_d_t()
return q_gen_OR_R_d_t * (A_OR / 51.34)
def calc_q_gen_NR_d_t(A_NR):
"""(64d)
Args:
A_NR: 非居室の床面積(m2)
Returns:
"""
q_gen_NR_R_d_t = get_q_gen_NR_R_d_t()
return q_gen_NR_R_d_t * (A_NR / 38.93)
# 日付dの時刻tにおける標準住戸の主たる居室の内部発熱(W)
def get_q_gen_MR_R_d_t():
""":return: 日付dの時刻tにおける標準住戸の主たる居室の内部発熱(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_4 = get_table_4()
# 全日平日とみなした24時間365日の標準住戸における内部発熱
tmp_a = np.tile(table_4[0], 365)
# 全日休日とみなした24時間365日の標準住戸における内部発熱
tmp_b = np.tile(table_4[1], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
q_gen_MR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return q_gen_MR_R_d_t
# 日付dの時刻tにおける標準住戸のその他の居室の内部発熱(W)
def get_q_gen_OR_R_d_t():
""":return: 日日付dの時刻tにおける標準住戸のその他の居室の内部発熱(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_4 = get_table_4()
# 全日平日とみなした24時間365日の標準住戸における内部発熱
tmp_a = np.tile(table_4[2], 365)
# 全日休日とみなした24時間365日の標準住戸における内部発熱
tmp_b = np.tile(table_4[3], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
q_gen_OR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return q_gen_OR_R_d_t
# 日付dの時刻tにおける標準住戸の非居室の内部発熱(W)
def get_q_gen_NR_R_d_t():
""":return: 日付dの時刻tにおける標準住戸の非居室の内部発熱(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_4 = get_table_4()
# 全日平日とみなした24時間365日の標準住戸における内部発熱
tmp_a = np.tile(table_4[4], 365)
# 全日休日とみなした24時間365日の標準住戸における内部発熱
tmp_b = np.tile(table_4[5], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
q_gen_NR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return q_gen_NR_R_d_t
# 標準住戸における内部発熱
def get_table_4():
""" """
return [
(66.9,66.9,66.9,66.9,66.9,66.9,123.9,383.6,323.2,307.3,134.8,66.9,286.7,271.2,66.9,66.9,236.9,288.6,407.8,383.1,423.1,339.1,312.9,278),
(66.9,66.9,66.9,66.9,66.9,66.9,66.9,66.9,440.5,443.3,515.1,488.9,422.9,174.4,66.9,66.9,237.8,407.8,383.1,326.8,339.1,339.1,312.9,66.9),
(18,18,18,18,18,18,18,18,18,398.2,18,18,18,18,18,18,18,18,53,53,115.5,103,258.3,137.3),
(18,18,18,18,18,18,18,18,35.5,654.3,223,223,53,18,18,18,93,93,55.5,18,270,168.8,270,18),
(41.5,41.5,41.5,41.5,41.5,41.5,126.1,249.9,158.3,191.3,117.5,41.5,42.5,89,41.5,41.5,105.8,105.8,112.1,118.5,155.7,416.1,314.8,174.9),
(41.5,41.5,41.5,41.5,41.5,41.5,41.5,281.3,311,269.5,100.4,106.7,98.5,55.8,41.5,41.5,158.4,171.3,82.7,101.4,99.5,255.1,232.1,157.8),
]
# 日付dの時刻tにおける内部発湿
def get_w_gen_d_t(w_gen_MR_d_t, w_gen_OR_d_t, w_gen_NR_d_t):
"""(65a)
Args:
w_gen_MR_d_t: 日付dの時刻tにおける主たる居室の内部発湿(W)
w_gen_OR_d_t: 日付dの時刻tにおけるその他の居室の内部発湿(W)
w_gen_NR_d_t: 日付dの時刻tにおける非居室の内部発湿(W)
Returns:
日付dの時刻tにおける内部発湿(W)
"""
return w_gen_MR_d_t + w_gen_OR_d_t + w_gen_NR_d_t
def calc_w_gen_MR_d_t(A_MR):
"""(65b)
Args:
A_MR: 主たる居室の床面積(m2)
Returns:
"""
w_gen_MR_R_d_t = get_w_gen_MR_R_d_t()
return w_gen_MR_R_d_t * (A_MR / 29.81)
def calc_w_gen_OR_d_t(A_OR):
"""(65c)
Args:
A_OR: その他の居室の床面積(m2)
Returns:
"""
w_gen_OR_R_d_t = get_w_gen_OR_R_d_t()
return w_gen_OR_R_d_t * (A_OR / 51.34)
def calc_w_gen_NR_d_t(A_NR):
"""(65d)
Args:
A_NR: 非居室の床面積(m2)
Returns:
"""
w_gen_NR_R_d_t = get_w_gen_NR_R_d_t()
return w_gen_NR_R_d_t * (A_NR / 38.93)
# 日付dの時刻tにおける標準住戸の主たる居室の内部発湿(W)
def get_w_gen_MR_R_d_t():
""":return: 日付dの時刻tにおける標準住戸の主たる居室の内部発湿(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_5 = get_table_5()
# 全日平日とみなした24時間365日の標準住戸における内部発湿
tmp_a = np.tile(table_5[0], 365)
# 全日休日とみなした24時間365日の標準住戸における内部発湿
tmp_b = np.tile(table_5[1], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
w_gen_MR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return w_gen_MR_R_d_t
# 日付dの時刻tにおける標準住戸のその他の居室の内部発湿(W)
def get_w_gen_OR_R_d_t():
""":return: 日日付dの時刻tにおける標準住戸のその他の居室の内部発湿(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_5 = get_table_5()
# 全日平日とみなした24時間365日の標準住戸における内部発湿
tmp_a = np.tile(table_5[2], 365)
# 全日休日とみなした24時間365日の標準住戸における内部発湿
tmp_b = np.tile(table_5[3], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
w_gen_OR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return w_gen_OR_R_d_t
# 日付dの時刻tにおける標準住戸の非居室の内部発湿(W)
def get_w_gen_NR_R_d_t():
""":return: 日付dの時刻tにおける標準住戸の非居室の内部発湿(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_5 = get_table_5()
# 全日平日とみなした24時間365日の標準住戸における内部発湿
tmp_a = np.tile(table_5[4], 365)
# 全日休日とみなした24時間365日の標準住戸における内部発湿
tmp_b = np.tile(table_5[5], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
w_gen_NR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return w_gen_NR_R_d_t
# 標準住戸における内部発熱
def get_table_5():
""" """
return [
(0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
]
# ============================================================================
# 13.2.6 人体発熱および在室人数
# ============================================================================
# 暖房期における人体からの1人当たりの顕熱発熱量(W/人)
def get_q_p_H():
""" """
return 79.0
# 冷房期における人体からの1人当たりの顕熱発熱量(W/人)
def get_q_p_CS():
""" """
return 51.0
# 冷房期における人体からの1人当たりの潜熱発熱量(W/人)
def get_q_p_CL():
""" """
return 40.0
# 日付dの時刻tにおける在室人数(人)
def get_n_p_d_t(n_p_MR_d_t, n_p_OR_d_t, n_p_NR_d_t):
"""(66a)
Args:
q_gen_MR_d_t: 日付dの時刻tにおける主たる居室の在室人数(人)
q_gen_OR_d_t: 日付dの時刻tにおけるその他の居室の在室人数(人)
q_gen_NR_d_t: 日付dの時刻tにおける非居室の在室人数(人)
n_p_MR_d_t: param n_p_OR_d_t:
n_p_NR_d_t: returns: 日付dの時刻tにおける在室人数(人)
n_p_OR_d_t:
Returns:
日付dの時刻tにおける在室人数(人)
"""
return n_p_MR_d_t + n_p_OR_d_t + n_p_NR_d_t
def calc_n_p_MR_d_t(A_MR):
"""(66b)
Args:
A_MR: 主たる居室の床面積(m2)
Returns:
"""
n_p_MR_R_d_t = get_n_p_MR_R_d_t()
return n_p_MR_R_d_t * (A_MR / 29.81)
def calc_n_p_OR_d_t(A_OR):
"""(66c)
Args:
A_OR: その他の居室の床面積(m2)
Returns:
"""
n_p_OR_R_d_t = get_n_p_OR_R_d_t()
return n_p_OR_R_d_t * (A_OR / 51.34)
def calc_n_p_NR_d_t(A_NR):
"""(66d)
Args:
A_NR: 非居室の床面積(m2)
Returns:
"""
n_p_NR_R_d_t = get_n_p_NR_R_d_t()
return n_p_NR_R_d_t * (A_NR / 38.93)
# 日付dの時刻tにおける標準住戸の主たる居室の在室人数(W)
def get_n_p_MR_R_d_t():
""":return: 日付dの時刻tにおける標準住戸の主たる居室の在室人数(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_6 = get_table_6()
# 全日平日とみなした24時間365日の標準住戸における在室人数
tmp_a = np.tile(table_6[0], 365)
# 全日休日とみなした24時間365日の標準住戸における在室人数
tmp_b = np.tile(table_6[1], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
n_p_MR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return n_p_MR_R_d_t
# 日付dの時刻tにおける標準住戸のその他の居室の在室人数(W)
def get_n_p_OR_R_d_t():
""":return: 日日付dの時刻tにおける標準住戸のその他の居室の在室人数(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_6 = get_table_6()
# 全日平日とみなした24時間365日の標準住戸における在室人数
tmp_a = np.tile(table_6[2], 365)
# 全日休日とみなした24時間365日の標準住戸における在室人数
tmp_b = np.tile(table_6[3], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
n_p_OR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return n_p_OR_R_d_t
# 日付dの時刻tにおける標準住戸の非居室の在室人数(W)
def get_n_p_NR_R_d_t():
""":return: 日付dの時刻tにおける標準住戸の非居室の在室人数(W)"""
schedule = load_schedule()
schedule_ac = get_schedule_ac(schedule)
table_6 = get_table_6()
# 全日平日とみなした24時間365日の標準住戸における在室人数
tmp_a = np.tile(table_6[4], 365)
# 全日休日とみなした24時間365日の標準住戸における在室人数
tmp_b = np.tile(table_6[5], 365)
# 時間単位に展開した生活パターン
schedule_extend = np.repeat(np.array(schedule_ac), 24)
n_p_NR_R_d_t = tmp_a * (schedule_extend == '平日') \
+ tmp_b * (schedule_extend == '休日')
return n_p_NR_R_d_t
# 標準住戸における内部発熱
def get_table_6():
""" """
return [
(0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 0, 0, 1, 1, 0, 0, 1, 2, 2, 3, 3, 2, 1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 2, 2, 1, 0, 0, 2, 3, 3, 4, 2, 2, 1, 0),
(4, 4, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 2, 3),
(4, 4, 4, 4, 4, 4, 4, 3, 1, 2, 2, 2, 1, 0, 0, 0, 1, 1, 1, 0, 2, 2, 2, 3),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
]
# ============================================================================
# 13.3 使い方
# ============================================================================
# ============================================================================
# 13.3.1 暖冷房期間
# ============================================================================
# 暖冷房期間
def get_season_array(region):
"""
Args:
region:
Returns:
"""
table_7 = get_table_7()[region-1]
H = get_period_array(table_7[0], table_7[1])
C = get_period_array(table_7[2], table_7[3])
M = np.logical_and(np.logical_not(H), np.logical_not(C))
return H, C, M
# 暖冷房期間を24*365の配列で返す
def get_season_array_d_t(region):
"""
Args:
region:
Returns:
"""
H, C, M = get_season_array(region)
H = np.repeat(H, 24)
C = np.repeat(C, 24)
M = np.repeat(M, 24)
return H, C, M
def get_period_array(p1, p2):
"""指定月日期間のみTrueのndarrayを作成する
指定月日期間のみTrueのndarrayを作成する。
開始月日が終了月日が逆転する場合は、年をまたいだとみなす。
Args:
p1: 開始月日をtuple指定 例) 1月2日 であれば (1,2)
p2: 終了月日をtuple指定 例) 5月4日 であれば (5,4)
Returns:
p1からp2の間はTrueである365の長さのndarray
"""
if p1 is None or p2 is None:
return np.zeros(365, dtype=bool)
d_base = datetime.date(2018, 1, 1) #年初
d1 = (datetime.date(2018, p1[0], p1[1]) - d_base).days #年初からの日数1
d2 = (datetime.date(2018, p2[0], p2[1]) - d_base).days #年初からの日数2
if d1 < d2:
# d1からd2の間はTrue
# 例) 7月10日~8月31日
arr = np.zeros(365, dtype=bool)
arr[d1:d2+1] = True
else:
# d1からd2の間はFalse
# 例) 9月24日~6月7日
arr = np.ones(365, dtype=bool)
arr[d2+1:d1] = False
return arr
def get_table_7():
""" """
return [
((9, 24), (6, 7), (7, 10), (8, 31)),
((9, 26), (6, 4), (7, 15), (8, 31)),
((9, 30), (5, 31), (7, 10), (8, 31)),
((10, 1), (5, 30), (7, 10), (8, 31)),
((10, 10), (5, 15), (7, 6), (8, 31)),
((11, 4), (4, 21), (5, 30), (9, 23)),
((11, 26), (3, 27), (5, 15), (10, 13)),
(None, None, (3, 25), (12, 14)),
]
# ============================================================================
# 13.3.2 設定温度・設定絶対湿度
# ============================================================================
# 暖房時の設定温度 (℃)
def get_Theta_set_H():
""" """
return 20.0
# 冷房時の設定温度 (℃)
def get_Theta_set_C():
""" """
return 27.0
# 冷房時の設定絶対湿度(空気温度27℃、60%の時の絶対湿度とする) (kg/kg(DA))
def get_X_set_C():
""" """
return 0.013425743
# ============================================================================
# 13.4 暖房負荷・冷房負荷
# ============================================================================
# ============================================================================
# 13.5 空気および水の物性値
# ============================================================================
# 空気の比熱 (J/Kg・K)
def get_c_p_air():
""" """
return 1006.0
# 空気の密度 (kg/m3)
def get_rho_air():
""" """
return 1.2
# 水の蒸発潜熱 (kJ/kg) (67)
def get_L_wtr():
""" """
Theta = get_Theta()
return 2500.8 - 2.3668 * Theta
# 冷房時を仮定した温度 (℃)
def get_Theta():
""" """
return 27
| 27.665517
| 169
| 0.579995
|
8057c8418396a2449fd9d79a518de7812de8ebb1
| 2,260
|
py
|
Python
|
tests/technologies/test_diffusion_technologies.py
|
nismod/energy_demand
|
247fcea074a846026710ed9b039b22f8b9835643
|
[
"MIT"
] | 14
|
2018-02-23T10:03:45.000Z
|
2022-03-03T13:59:30.000Z
|
tests/technologies/test_diffusion_technologies.py
|
nismod/energy_demand
|
247fcea074a846026710ed9b039b22f8b9835643
|
[
"MIT"
] | 59
|
2017-02-22T15:03:30.000Z
|
2020-12-16T12:26:17.000Z
|
tests/technologies/test_diffusion_technologies.py
|
nismod/energy_demand
|
247fcea074a846026710ed9b039b22f8b9835643
|
[
"MIT"
] | 5
|
2017-08-22T11:31:42.000Z
|
2020-06-24T18:30:12.000Z
|
"""
"""
from energy_demand.technologies import diffusion_technologies
def test_linear_diff():
"""Testing function
"""
expected1 = 1
expected2 = 1.5
expected3 = 10
expected4 = 0
# call function
out_value_1 = diffusion_technologies.linear_diff(
base_yr=2015,
curr_yr=2020,
value_start=0.0,
value_end=1.0,
yr_until_changed=2020)
assert out_value_1 == expected1
out_value_2 = diffusion_technologies.linear_diff(
base_yr=2015,
curr_yr=2016.5,
value_start=1.0,
value_end=2.0,
yr_until_changed=2018)
assert out_value_2 == expected2
out_value_3 = diffusion_technologies.linear_diff(
base_yr=2000,
curr_yr=2100,
value_start=0,
value_end=100.0,
yr_until_changed=3000)
assert out_value_3 == expected3
out_value_4 = diffusion_technologies.linear_diff(
base_yr=2015,
curr_yr=2015,
value_start=0,
value_end=1,
yr_until_changed=2015)
assert out_value_4 == expected4
def test_sigmoid_diffusion():
"""testing
"""
base_yr = 2015
curr_yr = 2015
end_yr = 2020
sig_midpoint = 0
sig_steepness = 1
result = diffusion_technologies.sigmoid_diffusion(
base_yr,
curr_yr,
end_yr,
sig_midpoint,
sig_steepness)
assert result == 0
base_yr = 2015
curr_yr = 2020
end_yr = 2020
sig_midpoint = 0
sig_steepness = 1
result = diffusion_technologies.sigmoid_diffusion(
base_yr,
curr_yr,
end_yr,
sig_midpoint,
sig_steepness)
assert result == 1
# ---
base_yr = 2015
curr_yr = 2015
end_yr = 2020
sig_midpoint = 0
sig_steepness = 1
result = diffusion_technologies.sigmoid_diffusion(
base_yr,
curr_yr,
end_yr,
sig_midpoint,
sig_steepness)
assert result == 0
# ---
base_yr = 2015
curr_yr = 2020
end_yr = 2025
sig_midpoint = 0
sig_steepness = 1
result = diffusion_technologies.sigmoid_diffusion(
base_yr,
curr_yr,
end_yr,
sig_midpoint,
sig_steepness)
assert result == 0.5
| 20.545455
| 61
| 0.606195
|
58cdcccd3e76f7826197d10b46b515360c56d3de
| 272
|
py
|
Python
|
picker/host_key.py
|
laashub-sua/demo-pywinauto
|
5f33a62c35978b21ce14a3b7f2058dc750fa1945
|
[
"Apache-2.0"
] | 1
|
2020-07-25T15:03:16.000Z
|
2020-07-25T15:03:16.000Z
|
picker/host_key.py
|
laashub-sua/demo-pywinauto
|
5f33a62c35978b21ce14a3b7f2058dc750fa1945
|
[
"Apache-2.0"
] | null | null | null |
picker/host_key.py
|
laashub-sua/demo-pywinauto
|
5f33a62c35978b21ce14a3b7f2058dc750fa1945
|
[
"Apache-2.0"
] | null | null | null |
from pynput import keyboard
def on_activate_h():
print('<ctrl>+<alt>+h pressed')
def on_activate_i():
print('<ctrl>+<alt>+i pressed')
with keyboard.GlobalHotKeys({
'<ctrl>+<alt>+h': on_activate_h,
'<ctrl>+<alt>+i': on_activate_i}) as h:
h.join()
| 17
| 43
| 0.628676
|
d82b2181ce8de905139b8e6d54cf12918f0800ad
| 1,466
|
py
|
Python
|
doc/integrations/pytorch/parlai/crowdsourcing/tasks/model_chat/run.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2020-09-27T05:00:06.000Z
|
2020-09-27T05:00:06.000Z
|
doc/integrations/pytorch/parlai/crowdsourcing/tasks/model_chat/run.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-08-04T11:17:39.000Z
|
2021-08-04T11:17:39.000Z
|
doc/integrations/pytorch/parlai/crowdsourcing/tasks/model_chat/run.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-05-03T13:27:14.000Z
|
2021-05-03T13:27:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from dataclasses import dataclass, field
from typing import Any, List
import hydra
from mephisto.operations.hydra_config import register_script_config
from omegaconf import DictConfig
from parlai.crowdsourcing.tasks.model_chat.model_chat_blueprint import BLUEPRINT_TYPE
from parlai.crowdsourcing.tasks.model_chat.impl import run_task
from parlai.crowdsourcing.utils.mturk import MTurkRunScriptConfig
TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
defaults = [
{'mephisto/blueprint': BLUEPRINT_TYPE},
{"mephisto/architect": "local"},
{"mephisto/provider": "mock"},
{"conf": "example"},
]
@dataclass
class ScriptConfig(MTurkRunScriptConfig):
defaults: List[Any] = field(default_factory=lambda: defaults)
task_dir: str = TASK_DIRECTORY
monitoring_log_rate: int = field(
default=30,
metadata={
'help': 'Frequency in seconds of logging the monitoring of the crowdsourcing task'
},
)
register_script_config(name='scriptconfig', module=ScriptConfig)
@hydra.main(config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
run_task(cfg=cfg, task_directory=TASK_DIRECTORY)
if __name__ == "__main__":
main()
| 27.660377
| 95
| 0.72101
|
a5899efe41570ca1d9d1a29b03cfa5661f0f4802
| 197
|
py
|
Python
|
lab08/tests/q0.py
|
ucsb-ds/ds1-f20-content
|
25f62c7a597b98da436ca39631761c1f3feccfdd
|
[
"MIT"
] | 2
|
2020-10-14T12:43:18.000Z
|
2021-01-06T18:06:16.000Z
|
lab08/tests/q0.py
|
ucsb-ds/ds1-f20-content
|
25f62c7a597b98da436ca39631761c1f3feccfdd
|
[
"MIT"
] | 2
|
2020-10-13T05:21:15.000Z
|
2020-12-11T18:03:54.000Z
|
lab08/tests/q0.py
|
ucsb-ds/ds1-f20-content
|
25f62c7a597b98da436ca39631761c1f3feccfdd
|
[
"MIT"
] | 3
|
2021-01-05T05:30:01.000Z
|
2021-03-16T22:09:08.000Z
|
test = {'name': 'q0', 'points': 1, 'suites': [{'cases': [{'code': '>>> float(distance)\n13.0', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| 98.5
| 196
| 0.532995
|
a7def432d19e9f359dbed6c18816f0eacd8a55ab
| 5,688
|
py
|
Python
|
conll_extract/jackknifing/jackknife_conll2012.py
|
utahnlp/structured_tuning_for_srl
|
9d315575ddf929c1bc31252fd78dc85ba7319251
|
[
"Apache-2.0"
] | 354
|
2017-06-01T03:35:56.000Z
|
2022-03-16T02:50:27.000Z
|
conll_extract/jackknifing/jackknife_conll2012.py
|
utahnlp/structured_tuning_for_srl
|
9d315575ddf929c1bc31252fd78dc85ba7319251
|
[
"Apache-2.0"
] | 30
|
2017-10-15T05:48:57.000Z
|
2021-12-22T18:50:55.000Z
|
conll_extract/jackknifing/jackknife_conll2012.py
|
utahnlp/structured_tuning_for_srl
|
9d315575ddf929c1bc31252fd78dc85ba7319251
|
[
"Apache-2.0"
] | 96
|
2017-06-16T10:05:04.000Z
|
2022-03-16T13:02:36.000Z
|
# Random shuffle by sentences instead of samples (predicates).
import math
import os
import random
import sys
from os.path import join
def get_sent_to_samples(input_file, domain_file):
num_samples = 0
sent2samples = []
domain2sents = []
domain_names = []
fin = open(input_file, 'r')
fin_domains = open(domain_file, 'r')
prev_words = ''
prev_predicate = -1
for line in fin:
line = line.strip()
line_left = line.split('|||')[0]
pred_id = int(line_left.split()[0])
words = ' '.join(line_left.split()[1:])
dname = fin_domains.readline().strip()
if not (len(domain_names) > 0 and domain_names[-1] == dname):
domain2sents.append([])
domain_names.append(dname)
if not (words == prev_words and pred_id > prev_predicate):
sent2samples.append([])
assert (len(domain_names) > 0 and domain_names[-1] == dname)
num_sents = len(sent2samples) - 1
domain2sents[-1].append(num_sents)
prev_predicate = pred_id
prev_words = words
sent2samples[-1].append(num_samples)
num_samples += 1
fin.close()
fin_domains.close()
return (sent2samples, domain2sents, domain_names)
def get_sample_to_folds(sent2samples, sent_range, num_folds, dev_sents_pct):
'''
'''
num_sents = sent_range[1] - sent_range[0]
num_sents_per_fold = int(math.ceil(1.0 * num_sents / num_folds))
num_samples = sum([len(s) for s in sent2samples[sent_range[0]:sent_range[1]]])
print "Has %d training samples and %d sentences. Splitting to %d folds with %d sentences each."\
% (num_samples, num_sents, num_folds, num_sents_per_fold)
num_dev_sents = int(math.ceil(dev_sents_pct * num_sents_per_fold))
print "Num. dev sentences: %d." % num_dev_sents
strn = [set() for i in range(num_samples)]
sdev = [set() for i in range(num_samples)]
# prd: the entire heldout set.
sprd = [set() for i in range(num_samples)]
num_trn_samples = [0 for i in range(num_folds)]
num_dev_samples = [0 for i in range(num_folds)]
num_prd_samples = [0 for i in range(num_folds)]
train_sents = range(sent_range[0], sent_range[1])
#random.shuffle(train_sents)
s0 = sent2samples[sent_range[0]][0]
print 'Sample id staring at %d' % s0
for fid in range(num_folds):
ll = fid * num_sents_per_fold
rr = min(num_sents, ll + num_sents_per_fold)
#print fid, ll, rr, rr - ll
for sent_id in train_sents[ll:rr]:
for sample_id in sent2samples[sent_id]:
# Assign training folds to sample.
for fid2 in range(num_folds):
if fid2 != fid:
strn[sample_id - s0].add(fid2)
num_trn_samples[fid2] += 1
# Assign pred folds to sample.
sprd[sample_id - s0].add(fid)
num_prd_samples[fid] += 1
prd_sents = train_sents[ll:rr]
for sent_id in prd_sents[:min(len(prd_sents),num_dev_sents)]:
# Assign dev folds to sample.
for sample_id in sent2samples[sent_id]:
sdev[sample_id - s0].add(fid)
num_dev_samples[fid] += 1
#print strn[:10]
#print sdev[:10]
#print sprd[:10]
print "Num trn samples:", num_trn_samples
print "Num prd samples:", num_prd_samples
print "Num dev samples:", num_dev_samples
return (strn, sdev, sprd)
def split_file(input_file, output_files, sample2fold):
fin = open(input_file, 'r')
fout = [open(fn, 'w') for fn in output_files]
sample_id = 0
for line in fin:
for fid in sample2fold[sample_id]:
fout[fid].write(line.strip() + "\n")
sample_id += 1
fin.close()
for fo in fout:
fo.close()
if __name__ == '__main__':
RANDOM_SEED = 12345
NUM_FOLDS = 5
DEV_SENTS_PCT = 0.3
input_file = sys.argv[1]
domain_file = sys.argv[2]
output_dir = sys.argv[3]
sent2samples, domain2sents, domain_names = get_sent_to_samples(input_file, domain_file)
print 'Totol samples: ', sum([len(s) for s in sent2samples])
sample2fold_trn = []
sample2fold_dev = []
sample2fold_prd = []
random.seed(RANDOM_SEED)
for dname, in_domain_sents in zip(domain_names, domain2sents):
sent_range = [in_domain_sents[0], in_domain_sents[-1] + 1]
print dname, sent_range
strn, sdev, sprd = get_sample_to_folds(sent2samples, sent_range, NUM_FOLDS, DEV_SENTS_PCT)
sample2fold_trn.extend(strn)
sample2fold_dev.extend(sdev)
sample2fold_prd.extend(sprd)
print len(sample2fold_trn), len(sample2fold_dev), len(sample2fold_prd)
# Output ids
fout_trn_ids = [open(join(output_dir, 'train.f%02d.ids'%fid), 'w') for fid in range(NUM_FOLDS)]
fout_dev_ids = [open(join(output_dir, 'devel.f%02d.ids'%fid), 'w') for fid in range(NUM_FOLDS)]
fout_prd_ids = [open(join(output_dir + 'pred.f%02d.ids'%fid), 'w') for fid in range(NUM_FOLDS)]
num_samples = len(sample2fold_trn)
for sid in range(num_samples):
for fid in sample2fold_trn[sid]:
fout_trn_ids[fid].write("%d\n" % sid)
for fid in sample2fold_dev[sid]:
fout_dev_ids[fid].write("%d\n" % sid)
for fid in sample2fold_prd[sid]:
fout_prd_ids[fid].write("%d\n" % sid)
for fo in fout_trn_ids + fout_dev_ids + fout_prd_ids:
fo.close()
# Generate output files.
filename = input_file.split('/')[-1].split('.')[0]
print filename
split_file(input_file,
[join(output_dir, '%s.train.f%02d.txt'%(filename,fid)) for fid in range(NUM_FOLDS)],
sample2fold_trn)
split_file(input_file,
[join(output_dir, '%s.devel.f%02d.txt'%(filename,fid)) for fid in range(NUM_FOLDS)],
sample2fold_dev)
split_file(input_file,
[join(output_dir, '%s.pred.f%02d.txt'%(filename,fid)) for fid in range(NUM_FOLDS)],
sample2fold_prd)
| 30.913043
| 98
| 0.667018
|
88a0661b6866321a72bdfd1c9f704b70a872bc20
| 118,006
|
py
|
Python
|
src/metpy/io/nexrad.py
|
jibbals/MetPy
|
321ac6ee90db236d587e8a07bbd6cd91620e651f
|
[
"BSD-3-Clause"
] | null | null | null |
src/metpy/io/nexrad.py
|
jibbals/MetPy
|
321ac6ee90db236d587e8a07bbd6cd91620e651f
|
[
"BSD-3-Clause"
] | 43
|
2021-12-03T20:39:27.000Z
|
2022-03-29T12:32:15.000Z
|
src/metpy/io/nexrad.py
|
jibbals/MetPy
|
321ac6ee90db236d587e8a07bbd6cd91620e651f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2009,2015,2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Support reading information from various NEXRAD formats."""
import bz2
from collections import defaultdict, namedtuple, OrderedDict
import contextlib
import datetime
import logging
import pathlib
import re
import struct
from xdrlib import Unpacker
import numpy as np
from scipy.constants import day, milli
from ._tools import (Array, BitField, Bits, DictStruct, Enum, IOBuffer, NamedStruct,
open_as_needed, zlib_decompress_all_frames)
from ..package_tools import Exporter
exporter = Exporter(globals())
log = logging.getLogger(__name__)
def version(val):
"""Calculate a string version from an integer value."""
if val / 100. > 2.:
ver = val / 100.
else:
ver = val / 10.
return f'{ver:.1f}'
def scaler(scale):
"""Create a function that scales by a specific value."""
def inner(val):
return val * scale
return inner
def angle(val):
"""Convert an integer value to a floating point angle."""
return val * 360. / 2**16
def az_rate(val):
"""Convert an integer value to a floating point angular rate."""
return val * 90. / 2**16
def bzip_blocks_decompress_all(data):
"""Decompress all of the bzip2-ed blocks.
Returns the decompressed data as a `bytearray`.
"""
frames = bytearray()
offset = 0
while offset < len(data):
block_cmp_bytes = abs(int.from_bytes(data[offset:offset + 4], 'big', signed=True))
offset += 4
try:
frames += bz2.decompress(data[offset:offset + block_cmp_bytes])
offset += block_cmp_bytes
except OSError:
# If we've decompressed any frames, this is an error mid-stream, so warn, stop
# trying to decompress and let processing proceed
if frames:
logging.warning('Error decompressing bz2 block stream at offset: %d',
offset - 4)
break
else: # Otherwise, this isn't a bzip2 stream, so bail
raise ValueError('Not a bz2 stream.')
return frames
def nexrad_to_datetime(julian_date, ms_midnight):
"""Convert NEXRAD date time format to python `datetime.datetime`."""
# Subtracting one from julian_date is because epoch date is 1
return datetime.datetime.utcfromtimestamp((julian_date - 1) * day + ms_midnight * milli)
def remap_status(val):
"""Convert status integer value to appropriate bitmask."""
status = 0
bad = BAD_DATA if val & 0xF0 else 0
val &= 0x0F
if val == 0:
status = START_ELEVATION
elif val == 1:
status = 0
elif val == 2:
status = END_ELEVATION
elif val == 3:
status = START_ELEVATION | START_VOLUME
elif val == 4:
status = END_ELEVATION | END_VOLUME
elif val == 5:
status = START_ELEVATION | LAST_ELEVATION
return status | bad
START_ELEVATION = 0x1
END_ELEVATION = 0x2
START_VOLUME = 0x4
END_VOLUME = 0x8
LAST_ELEVATION = 0x10
BAD_DATA = 0x20
@exporter.export
class Level2File:
r"""Handle reading the NEXRAD Level 2 data and its various messages.
This class attempts to decode every byte that is in a given data file.
It supports both external compression, as well as the internal BZ2
compression that is used.
Attributes
----------
stid : str
The ID of the radar station
dt : Datetime instance
The date and time of the data
vol_hdr : namedtuple
The unpacked volume header
sweeps : list of tuples
Data for each of the sweeps found in the file
rda_status : namedtuple, optional
Unpacked RDA status information, if found
maintenance_data : namedtuple, optional
Unpacked maintenance data information, if found
maintenance_data_desc : dict, optional
Descriptions of maintenance data fields, if maintenance data present
vcp_info : namedtuple, optional
Unpacked VCP information, if found
clutter_filter_bypass_map : dict, optional
Unpacked clutter filter bypass map, if present
rda : dict, optional
Unpacked RDA adaptation data, if present
rda_adaptation_desc : dict, optional
Descriptions of RDA adaptation data, if adaptation data present
Notes
-----
The internal data structure that things are decoded into is still to be
determined.
"""
# Number of bytes
AR2_BLOCKSIZE = 2432 # 12 (CTM) + 2416 (Msg hdr + data) + 4 (FCS)
CTM_HEADER_SIZE = 12
MISSING = float('nan')
RANGE_FOLD = float('nan') # TODO: Need to separate from missing
def __init__(self, filename, *, has_volume_header=True):
r"""Create instance of `Level2File`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. Gzip-ed files are
recognized with the extension '.gz', as are bzip2-ed files with
the extension `.bz2` If `filename` is a file-like object,
this will be read from directly.
"""
fobj = open_as_needed(filename)
with contextlib.closing(fobj):
self._buffer = IOBuffer.fromfile(fobj)
# Try to read the volume header. If this fails, or we're told we don't have one
# then we fall back and try to just read messages, assuming we have e.g. one of
# the real-time chunks.
try:
if has_volume_header:
self._read_volume_header()
except (OSError, ValueError):
log.warning('Unable to read volume header. Attempting to read messages.')
self._buffer.reset()
# See if we need to apply bz2 decompression
start = self._buffer.set_mark()
try:
self._buffer = IOBuffer(self._buffer.read_func(bzip_blocks_decompress_all))
except ValueError:
self._buffer.jump_to(start)
# Now we're all initialized, we can proceed with reading in data
self._read_data()
vol_hdr_fmt = NamedStruct([('version', '9s'), ('vol_num', '3s'),
('date', 'L'), ('time_ms', 'L'), ('stid', '4s')], '>', 'VolHdr')
def _read_volume_header(self):
self.vol_hdr = self._buffer.read_struct(self.vol_hdr_fmt)
self.dt = nexrad_to_datetime(self.vol_hdr.date, self.vol_hdr.time_ms)
self.stid = self.vol_hdr.stid
msg_hdr_fmt = NamedStruct([('size_hw', 'H'),
('rda_channel', 'B', BitField('Redundant Channel 1',
'Redundant Channel 2',
None, 'ORDA')),
('msg_type', 'B'), ('seq_num', 'H'), ('date', 'H'),
('time_ms', 'I'), ('num_segments', 'H'), ('segment_num', 'H')],
'>', 'MsgHdr')
def _read_data(self):
self._msg_buf = {}
self.sweeps = []
self.rda_status = []
while not self._buffer.at_end():
# Clear old file book marks and set the start of message for
# easy jumping to the end
self._buffer.clear_marks()
msg_start = self._buffer.set_mark()
# Skip CTM
self._buffer.skip(self.CTM_HEADER_SIZE)
# Read the message header
msg_hdr = self._buffer.read_struct(self.msg_hdr_fmt)
log.debug('Got message: %s (at offset %d)', str(msg_hdr), self._buffer._offset)
# The AR2_BLOCKSIZE accounts for the CTM header before the
# data, as well as the Frame Check Sequence (4 bytes) after
# the end of the data.
msg_bytes = self.AR2_BLOCKSIZE
# If the size is 0, this is just padding, which is for certain
# done in the metadata messages. Let the default block size handle rather
# than any specific heuristic to skip.
if msg_hdr.size_hw:
# For new packets, the message size isn't on the fixed size boundaries,
# so we use header to figure out. For these, we need to include the
# CTM header but not FCS, in addition to the size.
# As of 2620002P, this is a special value used to indicate that the segment
# number/count bytes are used to indicate total size in bytes.
if msg_hdr.size_hw == 65535:
msg_bytes = (msg_hdr.num_segments << 16 | msg_hdr.segment_num
+ self.CTM_HEADER_SIZE)
elif msg_hdr.msg_type in (29, 31):
msg_bytes = self.CTM_HEADER_SIZE + 2 * msg_hdr.size_hw
log.debug('Total message size: %d', msg_bytes)
# Try to handle the message. If we don't handle it, skipping
# past it is handled at the end anyway.
decoder = f'_decode_msg{msg_hdr.msg_type:d}'
if hasattr(self, decoder):
getattr(self, decoder)(msg_hdr)
else:
log.warning('Unknown message: %d', msg_hdr.msg_type)
# Jump to the start of the next message. This depends on whether
# the message was legacy with fixed block size or not.
self._buffer.jump_to(msg_start, msg_bytes)
# Check if we have any message segments still in the buffer
if self._msg_buf:
log.warning('Remaining buffered message segments for message type(s): %s',
' '.join(f'{typ} ({len(rem)})' for typ, rem in self._msg_buf.items()))
del self._msg_buf
msg1_fmt = NamedStruct([('time_ms', 'L'), ('date', 'H'),
('unamb_range', 'H', scaler(0.1)), ('az_angle', 'H', angle),
('az_num', 'H'), ('rad_status', 'H', remap_status),
('el_angle', 'H', angle), ('el_num', 'H'),
('surv_first_gate', 'h', scaler(0.001)),
('doppler_first_gate', 'h', scaler(0.001)),
('surv_gate_width', 'H', scaler(0.001)),
('doppler_gate_width', 'H', scaler(0.001)),
('surv_num_gates', 'H'), ('doppler_num_gates', 'H'),
('cut_sector_num', 'H'), ('calib_dbz0', 'f'),
('ref_offset', 'H'), ('vel_offset', 'H'), ('sw_offset', 'H'),
('dop_res', 'H', BitField(None, 0.5, 1.0)), ('vcp', 'H'),
(None, '14x'), ('nyq_vel', 'H', scaler(0.01)),
('atmos_atten', 'H', scaler(0.001)), ('tover', 'H', scaler(0.1)),
('spot_blanking', 'B', BitField('Radial', 'Elevation', 'Volume')),
(None, '32x')], '>', 'Msg1Fmt')
msg1_data_hdr = namedtuple('Msg1DataHdr',
'name first_gate gate_width num_gates scale offset')
def _decode_msg1(self, msg_hdr):
msg_start = self._buffer.set_mark()
hdr = self._buffer.read_struct(self.msg1_fmt)
data_dict = {}
# Process all data pointers:
read_info = []
if hdr.surv_num_gates and hdr.ref_offset:
read_info.append((hdr.ref_offset,
self.msg1_data_hdr('REF', hdr.surv_first_gate,
hdr.surv_gate_width,
hdr.surv_num_gates, 2.0, 66.0)))
if hdr.doppler_num_gates and hdr.vel_offset:
read_info.append((hdr.vel_offset,
self.msg1_data_hdr('VEL', hdr.doppler_first_gate,
hdr.doppler_gate_width,
hdr.doppler_num_gates,
1. / hdr.dop_res, 129.0)))
if hdr.doppler_num_gates and hdr.sw_offset:
read_info.append((hdr.sw_offset,
self.msg1_data_hdr('SW', hdr.doppler_first_gate,
hdr.doppler_gate_width,
hdr.doppler_num_gates, 2.0, 129.0)))
for ptr, data_hdr in read_info:
# Jump and read
self._buffer.jump_to(msg_start, ptr)
vals = self._buffer.read_array(data_hdr.num_gates, 'B')
# Scale and flag data
scaled_vals = (vals - data_hdr.offset) / data_hdr.scale
scaled_vals[vals == 0] = self.MISSING
scaled_vals[vals == 1] = self.RANGE_FOLD
# Store
data_dict[data_hdr.name] = (data_hdr, scaled_vals)
self._add_sweep(hdr)
self.sweeps[-1].append((hdr, data_dict))
msg2_fmt = NamedStruct([
('rda_status', 'H', BitField('None', 'Start-Up', 'Standby', 'Restart',
'Operate', 'Spare', 'Off-line Operate')),
('op_status', 'H', BitField('Disabled', 'On-Line',
'Maintenance Action Required',
'Maintenance Action Mandatory',
'Commanded Shut Down', 'Inoperable',
'Automatic Calibration')),
('control_status', 'H', BitField('None', 'Local Only',
'RPG (Remote) Only', 'Either')),
('aux_power_gen_state', 'H', BitField('Switch to Aux Power',
'Utility PWR Available',
'Generator On',
'Transfer Switch Manual',
'Commanded Switchover')),
('avg_tx_pwr', 'H'), ('ref_calib_cor', 'h', scaler(0.01)),
('data_transmission_enabled', 'H', BitField('None', 'None',
'Reflectivity', 'Velocity', 'Width')),
('vcp_num', 'h'), ('rda_control_auth', 'H', BitField('No Action',
'Local Control Requested',
'Remote Control Enabled')),
('rda_build', 'H', version), ('op_mode', 'H', BitField('None', 'Test',
'Operational', 'Maintenance')),
('super_res_status', 'H', BitField('None', 'Enabled', 'Disabled')),
('cmd_status', 'H', Bits(6)),
('avset_status', 'H', BitField('None', 'Enabled', 'Disabled')),
('rda_alarm_status', 'H', BitField('No Alarms', 'Tower/Utilities',
'Pedestal', 'Transmitter', 'Receiver',
'RDA Control', 'Communication',
'Signal Processor')),
('command_acknowledge', 'H', BitField('Remote VCP Received',
'Clutter Bypass map received',
'Redundant Chan Ctrl Cmd received')),
('channel_control_status', 'H'),
('spot_blanking', 'H', BitField('Enabled', 'Disabled')),
('bypass_map_gen_date', 'H'), ('bypass_map_gen_time', 'H'),
('clutter_filter_map_gen_date', 'H'), ('clutter_filter_map_gen_time', 'H'),
('refv_calib_cor', 'h', scaler(0.01)),
('transition_pwr_src_state', 'H', BitField('Off', 'OK')),
('RMS_control_status', 'H', BitField('RMS in control', 'RDA in control')),
# See Table IV-A for definition of alarms
(None, '2x'), ('alarms', '28s', Array('>14H'))], '>', 'Msg2Fmt')
msg2_additional_fmt = NamedStruct([
('sig_proc_options', 'H', BitField('CMD RhoHV Test')),
(None, '36x'), ('status_version', 'H')], '>', 'Msg2AdditionalFmt')
def _decode_msg2(self, msg_hdr):
msg_start = self._buffer.set_mark()
self.rda_status.append(self._buffer.read_struct(self.msg2_fmt))
remaining = (msg_hdr.size_hw * 2 - self.msg_hdr_fmt.size
- self._buffer.offset_from(msg_start))
# RDA Build 18.0 expanded the size
if remaining >= self.msg2_additional_fmt.size:
self.rda_status.append(self._buffer.read_struct(self.msg2_additional_fmt))
remaining -= self.msg2_additional_fmt.size
if remaining:
log.info('Padding detected in message 2. Length encoded as %d but offset when '
'done is %d', 2 * msg_hdr.size_hw, self._buffer.offset_from(msg_start))
def _decode_msg3(self, msg_hdr):
from ._nexrad_msgs.msg3 import descriptions, fields
self.maintenance_data_desc = descriptions
msg_fmt = DictStruct(fields, '>')
self.maintenance_data = self._buffer.read_struct(msg_fmt)
self._check_size(msg_hdr, msg_fmt.size)
vcp_fmt = NamedStruct([('size_hw', 'H'), ('pattern_type', 'H'),
('num', 'H'), ('num_el_cuts', 'H'),
('vcp_version', 'B'), ('clutter_map_group', 'B'),
('dop_res', 'B', BitField(None, 0.5, 1.0)),
('pulse_width', 'B', BitField('None', 'Short', 'Long')),
(None, '4x'), ('vcp_sequencing', 'H'),
('vcp_supplemental_info', 'H'), (None, '2x'),
('els', None)], '>', 'VCPFmt')
vcp_el_fmt = NamedStruct([('el_angle', 'H', angle),
('channel_config', 'B', Enum('Constant Phase', 'Random Phase',
'SZ2 Phase')),
('waveform', 'B', Enum('None', 'Contiguous Surveillance',
'Contig. Doppler with Ambiguity Res.',
'Contig. Doppler without Ambiguity Res.',
'Batch', 'Staggered Pulse Pair')),
('super_res', 'B', BitField('0.5 azimuth and 0.25km range res.',
'Doppler to 300km',
'Dual Polarization Control',
'Dual Polarization to 300km')),
('surv_prf_num', 'B'), ('surv_pulse_count', 'H'),
('az_rate', 'h', az_rate),
('ref_thresh', 'h', scaler(0.125)),
('vel_thresh', 'h', scaler(0.125)),
('sw_thresh', 'h', scaler(0.125)),
('zdr_thresh', 'h', scaler(0.125)),
('phidp_thresh', 'h', scaler(0.125)),
('rhohv_thresh', 'h', scaler(0.125)),
('sector1_edge', 'H', angle),
('sector1_doppler_prf_num', 'H'),
('sector1_pulse_count', 'H'), ('supplemental_data', 'H'),
('sector2_edge', 'H', angle),
('sector2_doppler_prf_num', 'H'),
('sector2_pulse_count', 'H'), ('ebc_angle', 'H', angle),
('sector3_edge', 'H', angle),
('sector3_doppler_prf_num', 'H'),
('sector3_pulse_count', 'H'), (None, '2x')], '>', 'VCPEl')
def _decode_msg5(self, msg_hdr):
vcp_info = self._buffer.read_struct(self.vcp_fmt)
els = [self._buffer.read_struct(self.vcp_el_fmt) for _ in range(vcp_info.num_el_cuts)]
self.vcp_info = vcp_info._replace(els=els)
self._check_size(msg_hdr,
self.vcp_fmt.size + vcp_info.num_el_cuts * self.vcp_el_fmt.size)
def _decode_msg13(self, msg_hdr):
data = self._buffer_segment(msg_hdr)
if data:
data = struct.Struct(f'>{len(data) // 2:d}h').unpack(data)
# Legacy format doesn't have date/time and has fewer azimuths
if data[0] <= 5:
num_el = data[0]
dt = None
num_az = 256
offset = 1
else:
date, time, num_el = data[:3]
# time is in "minutes since midnight", need to pass as ms since midnight
dt = nexrad_to_datetime(date, 60 * 1000 * time)
num_az = 360
offset = 3
self.clutter_filter_bypass_map = {'datetime': dt, 'data': []}
chunk_size = 32
bit_conv = Bits(16)
for e in range(num_el):
seg_num = data[offset]
if seg_num != (e + 1):
log.warning('Message 13 segments out of sync -- read %d but on %d',
seg_num, e + 1)
az_data = []
for _ in range(num_az):
gates = []
for i in range(1, chunk_size + 1):
gates.extend(bit_conv(data[offset + i]))
az_data.append(gates)
self.clutter_filter_bypass_map['data'].append(az_data)
offset += num_az * chunk_size + 1
if offset != len(data):
log.warning('Message 13 left data -- Used: %d Avail: %d', offset, len(data))
msg15_code_map = {0: 'Bypass Filter', 1: 'Bypass map in Control',
2: 'Force Filter'}
def _decode_msg15(self, msg_hdr):
# buffer the segments until we have the whole thing. The data
# will be returned concatenated when this is the case
data = self._buffer_segment(msg_hdr)
if data:
date, time, num_el, *data = struct.Struct(f'>{len(data) // 2:d}h').unpack(data)
if num_el == 0:
log.info('Message 15 num_el is 0--likely legacy clutter filter notch width. '
'Skipping...')
return
# time is in "minutes since midnight", need to pass as ms since midnight
self.clutter_filter_map = {'datetime': nexrad_to_datetime(date, 60 * 1000 * time),
'data': []}
offset = 0
for _ in range(num_el):
az_data = []
for _ in range(360):
num_rng = data[offset]
codes = data[offset + 1:offset + 1 + 2 * num_rng:2]
ends = data[offset + 2:offset + 2 + 2 * num_rng:2]
az_data.append(list(zip(ends, codes)))
offset += 2 * num_rng + 1
self.clutter_filter_map['data'].append(az_data)
if offset != len(data):
log.warning('Message 15 left data -- Used: %d Avail: %d', offset, len(data))
def _decode_msg18(self, msg_hdr):
# buffer the segments until we have the whole thing. The data
# will be returned concatenated when this is the case
data = self._buffer_segment(msg_hdr)
# Legacy versions don't even document this:
if data and self.vol_hdr.version[:8] not in (b'ARCHIVE2', b'AR2V0001'):
from ._nexrad_msgs.msg18 import descriptions, fields
self.rda_adaptation_desc = descriptions
# Can't use NamedStruct because we have more than 255 items--this
# is a CPython limit for arguments.
msg_fmt = DictStruct(fields, '>')
# Be extra paranoid about passing too much data in case of legacy files
self.rda = msg_fmt.unpack(data[:msg_fmt.size])
for num in (11, 21, 31, 32, 300, 301):
attr = f'VCPAT{num}'
dat = self.rda[attr]
vcp_hdr = self.vcp_fmt.unpack_from(dat, 0)
off = self.vcp_fmt.size
els = []
for _ in range(vcp_hdr.num_el_cuts):
els.append(self.vcp_el_fmt.unpack_from(dat, off))
off += self.vcp_el_fmt.size
self.rda[attr] = vcp_hdr._replace(els=els)
msg31_data_hdr_fmt = NamedStruct([('stid', '4s'), ('time_ms', 'L'),
('date', 'H'), ('az_num', 'H'),
('az_angle', 'f'), ('compression', 'B'),
(None, 'x'), ('rad_length', 'H'),
('az_spacing', 'B', Enum(0, 0.5, 1.0)),
('rad_status', 'B', remap_status),
('el_num', 'B'), ('sector_num', 'B'),
('el_angle', 'f'),
('spot_blanking', 'B', BitField('Radial', 'Elevation',
'Volume')),
('az_index_mode', 'B', scaler(0.01)),
('num_data_blks', 'H')], '>', 'Msg31DataHdr')
msg31_vol_const_fmt = NamedStruct([('type', 's'), ('name', '3s'),
('size', 'H'), ('major', 'B'),
('minor', 'B'), ('lat', 'f'), ('lon', 'f'),
('site_amsl', 'h'), ('feedhorn_agl', 'H'),
('calib_dbz', 'f'), ('txpower_h', 'f'),
('txpower_v', 'f'), ('sys_zdr', 'f'),
('phidp0', 'f'), ('vcp', 'H'),
('processing_status', 'H', BitField('RxR Noise',
'CBT'))],
'>', 'VolConsts')
msg31_el_const_fmt = NamedStruct([('type', 's'), ('name', '3s'),
('size', 'H'), ('atmos_atten', 'h', scaler(0.001)),
('calib_dbz0', 'f')], '>', 'ElConsts')
rad_const_fmt_v1 = NamedStruct([('type', 's'), ('name', '3s'), ('size', 'H'),
('unamb_range', 'H', scaler(0.1)),
('noise_h', 'f'), ('noise_v', 'f'),
('nyq_vel', 'H', scaler(0.01)),
(None, '2x')], '>', 'RadConstsV1')
rad_const_fmt_v2 = NamedStruct([('type', 's'), ('name', '3s'), ('size', 'H'),
('unamb_range', 'H', scaler(0.1)),
('noise_h', 'f'), ('noise_v', 'f'),
('nyq_vel', 'H', scaler(0.01)),
(None, '2x'), ('calib_dbz0_h', 'f'),
('calib_dbz0_v', 'f')], '>', 'RadConstsV2')
data_block_fmt = NamedStruct([('type', 's'), ('name', '3s'),
('reserved', 'L'), ('num_gates', 'H'),
('first_gate', 'H', scaler(0.001)),
('gate_width', 'H', scaler(0.001)),
('tover', 'H', scaler(0.1)),
('snr_thresh', 'h', scaler(0.1)),
('recombined', 'B', BitField('Azimuths', 'Gates')),
('data_size', 'B'),
('scale', 'f'), ('offset', 'f')], '>', 'DataBlockHdr')
Radial = namedtuple('Radial', 'header vol_consts elev_consts radial_consts moments')
def _decode_msg31(self, msg_hdr):
msg_start = self._buffer.set_mark()
data_hdr = self._buffer.read_struct(self.msg31_data_hdr_fmt)
if data_hdr.compression:
log.warning('Compressed message 31 not supported!')
# Read all the block pointers. While the ICD specifies that at least the vol, el, rad
# constant blocks as well as REF moment block are present, it says "the pointers are
# not order or location dependent."
radial = self.Radial(data_hdr, None, None, None, {})
block_count = 0
for ptr in self._buffer.read_binary(data_hdr.num_data_blks, '>L'):
if ptr:
block_count += 1
self._buffer.jump_to(msg_start, ptr)
info = self._buffer.get_next(6)
if info.startswith(b'RVOL'):
radial = radial._replace(
vol_consts=self._buffer.read_struct(self.msg31_vol_const_fmt))
elif info.startswith(b'RELV'):
radial = radial._replace(
elev_consts=self._buffer.read_struct(self.msg31_el_const_fmt))
elif info.startswith(b'RRAD'):
# Relies on the fact that the struct is small enough for its size
# to fit in a single byte
if int(info[-1]) == self.rad_const_fmt_v2.size:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v2)
else:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v1)
radial = radial._replace(radial_consts=rad_consts)
elif info.startswith(b'D'):
hdr = self._buffer.read_struct(self.data_block_fmt)
# TODO: The correctness of this code is not tested
vals = self._buffer.read_array(count=hdr.num_gates,
dtype=f'>u{hdr.data_size // 8}')
scaled_vals = (vals - hdr.offset) / hdr.scale
scaled_vals[vals == 0] = self.MISSING
scaled_vals[vals == 1] = self.RANGE_FOLD
radial.moments[hdr.name.strip()] = (hdr, scaled_vals)
else:
log.warning('Unknown Message 31 block type: %s', str(info[:4]))
self._add_sweep(data_hdr)
self.sweeps[-1].append(radial)
if data_hdr.num_data_blks != block_count:
log.warning('Incorrect number of blocks detected -- Got %d'
' instead of %d', block_count, data_hdr.num_data_blks)
if data_hdr.rad_length != self._buffer.offset_from(msg_start):
log.info('Padding detected in message. Length encoded as %d but offset when '
'done is %d', data_hdr.rad_length, self._buffer.offset_from(msg_start))
def _buffer_segment(self, msg_hdr):
# Add to the buffer
bufs = self._msg_buf.setdefault(msg_hdr.msg_type, {})
bufs[msg_hdr.segment_num] = self._buffer.read(2 * msg_hdr.size_hw
- self.msg_hdr_fmt.size)
# Warn for badly formatted data
if len(bufs) != msg_hdr.segment_num:
log.warning('Segment out of order (Got: %d Count: %d) for message type %d.',
msg_hdr.segment_num, len(bufs), msg_hdr.msg_type)
# If we're complete, return the full collection of data
if msg_hdr.num_segments == len(bufs):
self._msg_buf.pop(msg_hdr.msg_type)
return b''.join(bytes(item[1]) for item in sorted(bufs.items()))
else:
return None
def _add_sweep(self, hdr):
if not self.sweeps and not hdr.rad_status & START_VOLUME:
log.warning('Missed start of volume!')
if hdr.rad_status & START_ELEVATION:
self.sweeps.append([])
if len(self.sweeps) != hdr.el_num:
log.warning('Missed elevation -- Have %d but data on %d.'
' Compensating...', len(self.sweeps), hdr.el_num)
while len(self.sweeps) < hdr.el_num:
self.sweeps.append([])
def _check_size(self, msg_hdr, size):
hdr_size = msg_hdr.size_hw * 2 - self.msg_hdr_fmt.size
if size != hdr_size:
log.warning('Message type %d should be %d bytes but got %d',
msg_hdr.msg_type, size, hdr_size)
def reduce_lists(d):
"""Replace single item lists in a dictionary with the single item."""
for field in d:
old_data = d[field]
if len(old_data) == 1:
d[field] = old_data[0]
def two_comp16(val):
"""Return the two's-complement signed representation of a 16-bit unsigned integer."""
if val >> 15:
val = -(~val & 0x7fff) - 1
return val
def float16(val):
"""Convert a 16-bit floating point value to a standard Python float."""
# Fraction is 10 LSB, Exponent middle 5, and Sign the MSB
frac = val & 0x03ff
exp = (val >> 10) & 0x1F
sign = val >> 15
if exp:
value = 2 ** (exp - 16) * (1 + float(frac) / 2**10)
else:
value = float(frac) / 2**9
if sign:
value *= -1
return value
def float32(short1, short2):
"""Unpack a pair of 16-bit integers as a Python float."""
# Masking below in python will properly convert signed values to unsigned
return struct.unpack('>f', struct.pack('>HH', short1 & 0xFFFF, short2 & 0xFFFF))[0]
def date_elem(ind_days, ind_minutes):
"""Create a function to parse a datetime from the product-specific blocks."""
def inner(seq):
return nexrad_to_datetime(seq[ind_days], seq[ind_minutes] * 60 * 1000)
return inner
def scaled_elem(index, scale):
"""Create a function to scale a certain product-specific block."""
def inner(seq):
return seq[index] * scale
return inner
def combine_elem(ind1, ind2):
"""Create a function to combine two specified product-specific blocks into a single int."""
def inner(seq):
shift = 2**16
if seq[ind1] < 0:
seq[ind1] += shift
if seq[ind2] < 0:
seq[ind2] += shift
return (seq[ind1] << 16) | seq[ind2]
return inner
def float_elem(ind1, ind2):
"""Create a function to combine two specified product-specific blocks into a float."""
return lambda seq: float32(seq[ind1], seq[ind2])
def high_byte(ind):
"""Create a function to return the high-byte of a product-specific block."""
def inner(seq):
return seq[ind] >> 8
return inner
def low_byte(ind):
"""Create a function to return the low-byte of a product-specific block."""
def inner(seq):
return seq[ind] & 0x00FF
return inner
def delta_time(ind):
"""Create a function to return the delta time from a product-specific block."""
def inner(seq):
return seq[ind] >> 5
return inner
def supplemental_scan(ind):
"""Create a function to return the supplement scan type from a product-specific block."""
def inner(seq):
# ICD says 1->SAILS, 2->MRLE, but testing on 2020-08-17 makes this seem inverted
# given what's being reported by sites in the GSM.
return {0: 'Non-supplemental scan',
2: 'SAILS scan', 1: 'MRLE scan'}.get(seq[ind] & 0x001F, 'Unknown')
return inner
# Data mappers used to take packed data and turn into physical units
# Default is to use numpy array indexing to use LUT to change data bytes
# into physical values. Can also have a 'labels' attribute to give
# categorical labels
class DataMapper:
"""Convert packed integer data into physical units."""
# Need to find way to handle range folded
# RANGE_FOLD = -9999
RANGE_FOLD = float('nan')
MISSING = float('nan')
def __init__(self, num=256):
self.lut = np.full(num, self.MISSING, dtype=float)
def __call__(self, data):
"""Convert the values."""
return self.lut[data]
class DigitalMapper(DataMapper):
"""Maps packed integers to floats using a scale and offset from the product."""
_min_scale = 0.1
_inc_scale = 0.1
_min_data = 2
_max_data = 255
range_fold = False
def __init__(self, prod):
"""Initialize the mapper and the lookup table."""
super().__init__()
min_val = two_comp16(prod.thresholds[0]) * self._min_scale
inc = prod.thresholds[1] * self._inc_scale
num_levels = prod.thresholds[2]
# Generate lookup table -- sanity check on num_levels handles
# the fact that DHR advertises 256 levels, which *includes*
# missing, differing from other products
num_levels = min(num_levels, self._max_data - self._min_data + 1)
for i in range(num_levels):
self.lut[i + self._min_data] = min_val + i * inc
class DigitalRefMapper(DigitalMapper):
"""Mapper for digital reflectivity products."""
units = 'dBZ'
class DigitalVelMapper(DigitalMapper):
"""Mapper for digital velocity products."""
units = 'm/s'
range_fold = True
class DigitalSPWMapper(DigitalVelMapper):
"""Mapper for digital spectrum width products."""
_min_data = 129
# ICD says up to 152, but also says max value is 19, which implies 129 + 19/0.5 -> 167
_max_data = 167
class PrecipArrayMapper(DigitalMapper):
"""Mapper for precipitation array products."""
_inc_scale = 0.001
_min_data = 1
_max_data = 254
units = 'dBA'
class DigitalStormPrecipMapper(DigitalMapper):
"""Mapper for digital storm precipitation products."""
units = 'inches'
_inc_scale = 0.01
class DigitalVILMapper(DataMapper):
"""Mapper for digital VIL products."""
def __init__(self, prod):
"""Initialize the VIL mapper."""
super().__init__()
lin_scale = float16(prod.thresholds[0])
lin_offset = float16(prod.thresholds[1])
log_start = prod.thresholds[2]
log_scale = float16(prod.thresholds[3])
log_offset = float16(prod.thresholds[4])
# VIL is allowed to use 2 through 254 inclusive. 0 is thresholded,
# 1 is flagged, and 255 is reserved
ind = np.arange(255)
self.lut[2:log_start] = (ind[2:log_start] - lin_offset) / lin_scale
self.lut[log_start:-1] = np.exp((ind[log_start:] - log_offset) / log_scale)
class DigitalEETMapper(DataMapper):
"""Mapper for digital echo tops products."""
def __init__(self, prod):
"""Initialize the mapper."""
super().__init__()
data_mask = prod.thresholds[0]
scale = prod.thresholds[1]
offset = prod.thresholds[2]
topped_mask = prod.thresholds[3]
self.topped_lut = [False] * 256
for i in range(2, 256):
self.lut[i] = ((i & data_mask) - offset) / scale
self.topped_lut[i] = bool(i & topped_mask)
self.topped_lut = np.array(self.topped_lut)
def __call__(self, data_vals):
"""Convert the data values."""
return self.lut[data_vals], self.topped_lut[data_vals]
class GenericDigitalMapper(DataMapper):
"""Maps packed integers to floats using a scale and offset from the product.
Also handles special data flags.
"""
def __init__(self, prod):
"""Initialize the mapper by pulling out all the information from the product."""
# Values will be [0, max] inclusive, so need to add 1 to max value to get proper size.
max_data_val = prod.thresholds[5]
super().__init__(max_data_val + 1)
scale = float32(prod.thresholds[0], prod.thresholds[1])
offset = float32(prod.thresholds[2], prod.thresholds[3])
leading_flags = prod.thresholds[6]
trailing_flags = prod.thresholds[7]
if leading_flags > 1:
self.lut[1] = self.RANGE_FOLD
# Need to add 1 to the end of the range so that it's inclusive
for i in range(leading_flags, max_data_val - trailing_flags + 1):
self.lut[i] = (i - offset) / scale
class DigitalHMCMapper(DataMapper):
"""Mapper for hydrometeor classification products.
Handles assigning string labels based on values.
"""
labels = ['ND', 'BI', 'GC', 'IC', 'DS', 'WS', 'RA', 'HR',
'BD', 'GR', 'HA', 'LH', 'GH', 'UK', 'RF']
def __init__(self, prod):
"""Initialize the mapper."""
super().__init__()
for i in range(10, 256):
self.lut[i] = i // 10
self.lut[150] = self.RANGE_FOLD
# 156, 157
class EDRMapper(DataMapper):
"""Mapper for eddy dissipation rate products."""
def __init__(self, prod):
"""Initialize the mapper based on the product."""
data_levels = prod.thresholds[2]
super().__init__(data_levels)
scale = prod.thresholds[0] / 1000.
offset = prod.thresholds[1] / 1000.
leading_flags = prod.thresholds[3]
for i in range(leading_flags, data_levels):
self.lut[i] = scale * i + offset
class LegacyMapper(DataMapper):
"""Mapper for legacy products."""
lut_names = ['Blank', 'TH', 'ND', 'RF', 'BI', 'GC', 'IC', 'GR', 'WS',
'DS', 'RA', 'HR', 'BD', 'HA', 'UK']
def __init__(self, prod):
"""Initialize the values and labels from the product."""
# Don't worry about super() since we're using our own lut assembled sequentially
self.labels = []
self.lut = []
for t in prod.thresholds:
codes, val = t >> 8, t & 0xFF
label = ''
if codes >> 7:
label = self.lut_names[val]
if label in ('Blank', 'TH', 'ND'):
val = self.MISSING
elif label == 'RF':
val = self.RANGE_FOLD
elif codes >> 6:
val *= 0.01
label = f'{val:.2f}'
elif codes >> 5:
val *= 0.05
label = f'{val:.2f}'
elif codes >> 4:
val *= 0.1
label = f'{val:.1f}'
if codes & 0x1:
val *= -1
label = '-' + label
elif (codes >> 1) & 0x1:
label = '+' + label
if (codes >> 2) & 0x1:
label = '<' + label
elif (codes >> 3) & 0x1:
label = '>' + label
if not label:
label = str(val)
self.lut.append(val)
self.labels.append(label)
self.lut = np.array(self.lut)
@exporter.export
class Level3File:
r"""Handle reading the wide array of NEXRAD Level 3 (NIDS) product files.
This class attempts to decode every byte that is in a given product file.
It supports all of the various compression formats that exist for these
products in the wild.
Attributes
----------
metadata : dict
Various general metadata available from the product
header : namedtuple
Decoded product header
prod_desc : namedtuple
Decoded product description block
siteID : str
ID of the site found in the header, empty string if none found
lat : float
Radar site latitude
lon : float
Radar site longitude
height : float
Radar site height AMSL
product_name : str
Name of the product contained in file
max_range : float
Maximum range of the product, taken from the NIDS ICD
map_data : Mapper
Class instance mapping data int values to proper floating point values
sym_block : list, optional
Any symbology block packets that were found
tab_pages : list, optional
Any tabular pages that were found
graph_pages : list, optional
Any graphical pages that were found
Notes
-----
The internal data structure that things are decoded into is still to be
determined.
"""
ij_to_km = 0.25
wmo_finder = re.compile('((?:NX|SD|NO)US)\\d{2}[\\s\\w\\d]+\\w*(\\w{3})\r\r\n')
header_fmt = NamedStruct([('code', 'H'), ('date', 'H'), ('time', 'l'),
('msg_len', 'L'), ('src_id', 'h'), ('dest_id', 'h'),
('num_blks', 'H')], '>', 'MsgHdr')
# See figure 3-17 in 2620001 document for definition of status bit fields
gsm_fmt = NamedStruct([('divider', 'h'), ('block_len', 'H'),
('op_mode', 'h', BitField('Clear Air', 'Precip')),
('rda_op_status', 'h', BitField('Spare', 'Online',
'Maintenance Required',
'Maintenance Mandatory',
'Commanded Shutdown', 'Inoperable',
'Spare', 'Wideband Disconnect')),
('vcp', 'h'), ('num_el', 'h'),
('el1', 'h', scaler(0.1)), ('el2', 'h', scaler(0.1)),
('el3', 'h', scaler(0.1)), ('el4', 'h', scaler(0.1)),
('el5', 'h', scaler(0.1)), ('el6', 'h', scaler(0.1)),
('el7', 'h', scaler(0.1)), ('el8', 'h', scaler(0.1)),
('el9', 'h', scaler(0.1)), ('el10', 'h', scaler(0.1)),
('el11', 'h', scaler(0.1)), ('el12', 'h', scaler(0.1)),
('el13', 'h', scaler(0.1)), ('el14', 'h', scaler(0.1)),
('el15', 'h', scaler(0.1)), ('el16', 'h', scaler(0.1)),
('el17', 'h', scaler(0.1)), ('el18', 'h', scaler(0.1)),
('el19', 'h', scaler(0.1)), ('el20', 'h', scaler(0.1)),
('rda_status', 'h', BitField('Spare', 'Startup', 'Standby',
'Restart', 'Operate',
'Off-line Operate')),
('rda_alarms', 'h', BitField('Indeterminate', 'Tower/Utilities',
'Pedestal', 'Transmitter', 'Receiver',
'RDA Control', 'RDA Communications',
'Signal Processor')),
('tranmission_enable', 'h', BitField('Spare', 'None',
'Reflectivity',
'Velocity', 'Spectrum Width',
'Dual Pol')),
('rpg_op_status', 'h', BitField('Loadshed', 'Online',
'Maintenance Required',
'Maintenance Mandatory',
'Commanded shutdown')),
('rpg_alarms', 'h', BitField('None', 'Node Connectivity',
'Wideband Failure',
'RPG Control Task Failure',
'Data Base Failure', 'Spare',
'RPG Input Buffer Loadshed',
'Spare', 'Product Storage Loadshed'
'Spare', 'Spare', 'Spare',
'RPG/RPG Intercomputer Link Failure',
'Redundant Channel Error',
'Task Failure', 'Media Failure')),
('rpg_status', 'h', BitField('Restart', 'Operate', 'Standby')),
('rpg_narrowband_status', 'h', BitField('Commanded Disconnect',
'Narrowband Loadshed')),
('h_ref_calib', 'h', scaler(0.25)),
('prod_avail', 'h', BitField('Product Availability',
'Degraded Availability',
'Not Available')),
('super_res_cuts', 'h', Bits(16)),
('cmd_status', 'h', Bits(6)),
('v_ref_calib', 'h', scaler(0.25)),
('rda_build', 'h', version), ('rda_channel', 'h'),
('reserved', 'h'), ('reserved2', 'h'),
('build_version', 'h', version)], '>', 'GSM')
# Build 14.0 added more bytes to the GSM
additional_gsm_fmt = NamedStruct([('el21', 'h', scaler(0.1)),
('el22', 'h', scaler(0.1)),
('el23', 'h', scaler(0.1)),
('el24', 'h', scaler(0.1)),
('el25', 'h', scaler(0.1)),
('vcp_supplemental', 'H',
BitField('AVSET', 'SAILS', 'Site VCP', 'RxR Noise',
'CBT', 'VCP Sequence', 'SPRT', 'MRLE',
'Base Tilt', 'MPDA')),
('supplemental_cut_map', 'H', Bits(16)),
('supplemental_cut_count', 'B'),
('supplemental_cut_map2', 'B', Bits(8)),
('spare', '80s')], '>', 'GSM')
prod_desc_fmt = NamedStruct([('divider', 'h'), ('lat', 'l'), ('lon', 'l'),
('height', 'h'), ('prod_code', 'h'),
('op_mode', 'h'), ('vcp', 'h'), ('seq_num', 'h'),
('vol_num', 'h'), ('vol_date', 'h'),
('vol_start_time', 'l'), ('prod_gen_date', 'h'),
('prod_gen_time', 'l'), ('dep1', 'h'),
('dep2', 'h'), ('el_num', 'h'), ('dep3', 'h'),
('thr1', 'h'), ('thr2', 'h'), ('thr3', 'h'),
('thr4', 'h'), ('thr5', 'h'), ('thr6', 'h'),
('thr7', 'h'), ('thr8', 'h'), ('thr9', 'h'),
('thr10', 'h'), ('thr11', 'h'), ('thr12', 'h'),
('thr13', 'h'), ('thr14', 'h'), ('thr15', 'h'),
('thr16', 'h'), ('dep4', 'h'), ('dep5', 'h'),
('dep6', 'h'), ('dep7', 'h'), ('dep8', 'h'),
('dep9', 'h'), ('dep10', 'h'), ('version', 'b'),
('spot_blank', 'b'), ('sym_off', 'L'), ('graph_off', 'L'),
('tab_off', 'L')], '>', 'ProdDesc')
sym_block_fmt = NamedStruct([('divider', 'h'), ('block_id', 'h'),
('block_len', 'L'), ('nlayer', 'H')], '>', 'SymBlock')
tab_header_fmt = NamedStruct([('divider', 'h'), ('block_id', 'h'),
('block_len', 'L')], '>', 'TabHeader')
tab_block_fmt = NamedStruct([('divider', 'h'), ('num_pages', 'h')], '>', 'TabBlock')
sym_layer_fmt = NamedStruct([('divider', 'h'), ('length', 'L')], '>',
'SymLayer')
graph_block_fmt = NamedStruct([('divider', 'h'), ('block_id', 'h'),
('block_len', 'L'), ('num_pages', 'H')], '>', 'GraphBlock')
standalone_tabular = [62, 73, 75, 82]
prod_spec_map = {16: ('Base Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
17: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
18: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
19: ('Base Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),
('calib_const', float_elem(7, 8)))),
20: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),
('calib_const', float_elem(7, 8)))),
21: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
22: ('Base Velocity', 60., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
23: ('Base Velocity', 115., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
24: ('Base Velocity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
25: ('Base Velocity', 60., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
26: ('Base Velocity', 115., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
27: ('Base Velocity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4),
('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)))),
28: ('Base Spectrum Width', 60., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
29: ('Base Spectrum Width', 115., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
30: ('Base Spectrum Width', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)))),
31: ('User Selectable Storm Total Precipitation', 230., LegacyMapper,
(('end_hour', 0),
('hour_span', 1),
('null_product', 2),
('max_rainfall', scaled_elem(3, 0.1)),
('rainfall_begin', date_elem(4, 5)),
('rainfall_end', date_elem(6, 7)),
('bias', scaled_elem(8, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)))),
32: ('Digital Hybrid Scan Reflectivity', 230., DigitalRefMapper,
(('max', 3),
('avg_time', date_elem(4, 5)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
33: ('Hybrid Scan Reflectivity', 230., LegacyMapper,
(('max', 3), ('avg_time', date_elem(4, 5)))),
34: ('Clutter Filter Control', 230., LegacyMapper,
(('clutter_bitmap', 0),
('cmd_map', 1),
('bypass_map_date', date_elem(4, 5)),
('notchwidth_map_date', date_elem(6, 7)))),
35: ('Composite Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
36: ('Composite Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
37: ('Composite Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
38: ('Composite Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
41: ('Echo Tops', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', scaled_elem(3, 1000)))), # Max in ft
48: ('VAD Wind Profile', None, LegacyMapper,
(('max', 3),
('dir_max', 4),
('alt_max', scaled_elem(5, 10)))), # Max in ft
50: ('Cross Section Reflectivity', 230., LegacyMapper,
(('azimuth1', scaled_elem(0, 0.1)),
('range1', scaled_elem(1, 0.1)),
('azimuth2', scaled_elem(2, 0.1)),
('range2', scaled_elem(3, 0.1)))),
51: ('Cross Section Velocity', 230., LegacyMapper,
(('azimuth1', scaled_elem(0, 0.1)),
('range1', scaled_elem(1, 0.1)),
('azimuth2', scaled_elem(2, 0.1)),
('range2', scaled_elem(3, 0.1)))),
55: ('Storm Relative Mean Radial Velocity', 50., LegacyMapper,
(('window_az', scaled_elem(0, 0.1)),
('window_range', scaled_elem(1, 0.1)),
('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('source', 5),
('height', 6),
('avg_speed', scaled_elem(7, 0.1)),
('avg_dir', scaled_elem(8, 0.1)),
('alert_category', 9))),
56: ('Storm Relative Mean Radial Velocity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('source', 5),
('avg_speed', scaled_elem(7, 0.1)),
('avg_dir', scaled_elem(8, 0.1)))),
57: ('Vertically Integrated Liquid', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))), # Max in kg / m^2
58: ('Storm Tracking Information', 460., LegacyMapper,
(('num_storms', 3),)),
59: ('Hail Index', 230., LegacyMapper, ()),
61: ('Tornado Vortex Signature', 230., LegacyMapper,
(('num_tvs', 3), ('num_etvs', 4))),
62: ('Storm Structure', 460., LegacyMapper, ()),
63: ('Layer Composite Reflectivity (Layer 1 Average)', 230., LegacyMapper,
(('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
64: ('Layer Composite Reflectivity (Layer 2 Average)', 230., LegacyMapper,
(('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
65: ('Layer Composite Reflectivity (Layer 1 Max)', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
66: ('Layer Composite Reflectivity (Layer 2 Max)', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
67: ('Layer Composite Reflectivity - AP Removed', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
74: ('Radar Coded Message', 460., LegacyMapper, ()),
78: ('Surface Rainfall Accumulation (1 hour)', 230., LegacyMapper,
(('max_rainfall', scaled_elem(3, 0.1)),
('bias', scaled_elem(4, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)),
('rainfall_end', date_elem(6, 7)))),
79: ('Surface Rainfall Accumulation (3 hour)', 230., LegacyMapper,
(('max_rainfall', scaled_elem(3, 0.1)),
('bias', scaled_elem(4, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)),
('rainfall_end', date_elem(6, 7)))),
80: ('Storm Total Rainfall Accumulation', 230., LegacyMapper,
(('max_rainfall', scaled_elem(3, 0.1)),
('rainfall_begin', date_elem(4, 5)),
('rainfall_end', date_elem(6, 7)),
('bias', scaled_elem(8, 0.01)),
('gr_pairs', scaled_elem(9, 0.01)))),
81: ('Hourly Digital Precipitation Array', 230., PrecipArrayMapper,
(('max_rainfall', scaled_elem(3, 0.001)),
('bias', scaled_elem(4, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)),
('rainfall_end', date_elem(6, 7)))),
82: ('Supplemental Precipitation Data', None, LegacyMapper, ()),
89: ('Layer Composite Reflectivity (Layer 3 Average)', 230., LegacyMapper,
(('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
90: ('Layer Composite Reflectivity (Layer 3 Max)', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
93: ('ITWS Digital Base Velocity', 115., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4), ('precision', 6))),
94: ('Base Reflectivity Data Array', 460., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
95: ('Composite Reflectivity Edited for AP', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
96: ('Composite Reflectivity Edited for AP', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
97: ('Composite Reflectivity Edited for AP', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
98: ('Composite Reflectivity Edited for AP', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
99: ('Base Velocity Data Array', 300., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
113: ('Power Removed Control', 300., LegacyMapper,
(('rpg_cut_num', 0), ('cmd_generated', 1),
('el_angle', scaled_elem(2, 0.1)),
('clutter_filter_map_dt', date_elem(4, 3)),
# While the 2620001Y ICD doesn't talk about using these
# product-specific blocks for this product, they have data in them
# and the compression info is necessary for proper decoding.
('compression', 7), ('uncompressed_size', combine_elem(8, 9)))),
132: ('Clutter Likelihood Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),)),
133: ('Clutter Likelihood Doppler', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),)),
134: ('High Resolution VIL', 460., DigitalVILMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('num_edited', 4),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
135: ('Enhanced Echo Tops', 345., DigitalEETMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', scaled_elem(3, 1000.)), # Max in ft
('num_edited', 4),
('ref_thresh', 5),
('points_removed', 6),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
138: ('Digital Storm Total Precipitation', 230., DigitalStormPrecipMapper,
(('rainfall_begin', date_elem(0, 1)),
('bias', scaled_elem(2, 0.01)),
('max', scaled_elem(3, 0.01)),
('rainfall_end', date_elem(4, 5)),
('gr_pairs', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
141: ('Mesocyclone Detection', 230., LegacyMapper,
(('min_ref_thresh', 0),
('overlap_display_filter', 1),
('min_strength_rank', 2))),
152: ('Archive III Status Product', None, LegacyMapper,
(('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
153: ('Super Resolution Reflectivity Data Array', 460., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
154: ('Super Resolution Velocity Data Array', 300., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
155: ('Super Resolution Spectrum Width Data Array', 300.,
DigitalSPWMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
156: ('Turbulence Detection (Eddy Dissipation Rate)', 230., EDRMapper,
(('el_start_time', 0),
('el_end_time', 1),
('el_angle', scaled_elem(2, 0.1)),
('min_el', scaled_elem(3, 0.01)),
('mean_el', scaled_elem(4, 0.01)),
('max_el', scaled_elem(5, 0.01)))),
157: ('Turbulence Detection (Eddy Dissipation Rate Confidence)', 230.,
EDRMapper,
(('el_start_time', 0),
('el_end_time', 1),
('el_angle', scaled_elem(2, 0.1)),
('min_el', scaled_elem(3, 0.01)),
('mean_el', scaled_elem(4, 0.01)),
('max_el', scaled_elem(5, 0.01)))),
158: ('Differential Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.1)),
('max', scaled_elem(4, 0.1)))),
159: ('Digital Differential Reflectivity', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.1)),
('max', scaled_elem(4, 0.1)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
160: ('Correlation Coefficient', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.00333)),
('max', scaled_elem(4, 0.00333)))),
161: ('Digital Correlation Coefficient', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.00333)),
('max', scaled_elem(4, 0.00333)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
162: ('Specific Differential Phase', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.05)),
('max', scaled_elem(4, 0.05)))),
163: ('Digital Specific Differential Phase', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.05)),
('max', scaled_elem(4, 0.05)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
164: ('Hydrometeor Classification', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),)),
165: ('Digital Hydrometeor Classification', 300., DigitalHMCMapper,
(('el_angle', scaled_elem(2, 0.1)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
166: ('Melting Layer', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)),)),
167: ('Super Res Digital Correlation Coefficient', 300.,
GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.00333)),
('max', scaled_elem(4, 0.00333)), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
168: ('Super Res Digital Phi', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4), ('delta_time', delta_time(6)),
('supplemental_scan', supplemental_scan(6)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
169: ('One Hour Accumulation', 230., LegacyMapper,
(('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('gr_pairs', scaled_elem(7, 0.01)))),
170: ('Digital Accumulation Array', 230., GenericDigitalMapper,
(('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
171: ('Storm Total Accumulation', 230., LegacyMapper,
(('rainfall_begin', date_elem(0, 1)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('gr_pairs', scaled_elem(7, 0.01)))),
172: ('Digital Storm Total Accumulation', 230., GenericDigitalMapper,
(('rainfall_begin', date_elem(0, 1)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
173: ('Digital User-Selectable Accumulation', 230., GenericDigitalMapper,
(('period', 1),
('missing_period', high_byte(2)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 0)),
('start_time', 5),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
174: ('Digital One-Hour Difference Accumulation', 230.,
GenericDigitalMapper,
(('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('min', scaled_elem(6, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
175: ('Digital Storm Total Difference Accumulation', 230.,
GenericDigitalMapper,
(('rainfall_begin', date_elem(0, 1)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('min', scaled_elem(6, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
176: ('Digital Instantaneous Precipitation Rate', 230.,
GenericDigitalMapper,
(('rainfall_begin', date_elem(0, 1)),
('precip_detected', high_byte(2)),
('need_bias', low_byte(2)),
('max', 3),
('percent_filled', scaled_elem(4, 0.01)),
('max_elev', scaled_elem(5, 0.1)),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
177: ('Hybrid Hydrometeor Classification', 230., DigitalHMCMapper,
(('mode_filter_size', 3),
('hybrid_percent_filled', 4),
('max_elev', scaled_elem(5, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
180: ('TDWR Base Reflectivity', 90., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
181: ('TDWR Base Reflectivity', 90., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
182: ('TDWR Base Velocity', 90., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
183: ('TDWR Base Velocity', 90., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4))),
185: ('TDWR Base Spectrum Width', 90., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
186: ('TDWR Long Range Base Reflectivity', 416., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
187: ('TDWR Long Range Base Reflectivity', 416., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3)))}
def __init__(self, filename):
r"""Create instance of `Level3File`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. If file-like object,
this will be read from directly.
"""
fobj = open_as_needed(filename)
if isinstance(filename, str):
self.filename = filename
elif isinstance(filename, pathlib.Path):
self.filename = str(filename)
else:
self.filename = 'No File'
# Just read in the entire set of data at once
with contextlib.closing(fobj):
self._buffer = IOBuffer.fromfile(fobj)
# Pop off the WMO header if we find it
self._process_wmo_header()
# Pop off last 4 bytes if necessary
self._process_end_bytes()
# Set up places to store data and metadata
self.metadata = {}
# Handle free text message products that are pure text
if self.wmo_code == 'NOUS':
self.header = None
self.prod_desc = None
self.thresholds = None
self.depVals = None
self.product_name = 'Free Text Message'
self.text = ''.join(self._buffer.read_ascii())
return
# Decompress the data if necessary, and if so, pop off new header
self._buffer = IOBuffer(self._buffer.read_func(zlib_decompress_all_frames))
self._process_wmo_header()
# Check for empty product
if len(self._buffer) == 0:
log.warning('%s: Empty product!', self.filename)
return
# Unpack the message header and the product description block
msg_start = self._buffer.set_mark()
self.header = self._buffer.read_struct(self.header_fmt)
log.debug('Buffer size: %d (%d expected) Header: %s', len(self._buffer),
self.header.msg_len, self.header)
if not self._buffer.check_remains(self.header.msg_len - self.header_fmt.size):
log.warning('Product contains an unexpected amount of data remaining--have: %d '
'expected: %d. This product may not parse correctly.',
len(self._buffer) - self._buffer._offset,
self.header.msg_len - self.header_fmt.size)
# Handle GSM and jump out
if self.header.code == 2:
self.gsm = self._buffer.read_struct(self.gsm_fmt)
self.product_name = 'General Status Message'
assert self.gsm.divider == -1
if self.gsm.block_len > 82:
# Due to the way the structures read it in, one bit from the count needs
# to be popped off and added as the supplemental cut status for the 25th
# elevation cut.
more = self._buffer.read_struct(self.additional_gsm_fmt)
cut_count = more.supplemental_cut_count
more.supplemental_cut_map2.append(bool(cut_count & 0x1))
self.gsm_additional = more._replace(supplemental_cut_count=cut_count >> 1)
assert self.gsm.block_len == 178
else:
assert self.gsm.block_len == 82
return
self.prod_desc = self._buffer.read_struct(self.prod_desc_fmt)
log.debug('Product description block: %s', self.prod_desc)
# Convert thresholds and dependent values to lists of values
self.thresholds = [getattr(self.prod_desc, f'thr{i}') for i in range(1, 17)]
self.depVals = [getattr(self.prod_desc, f'dep{i}') for i in range(1, 11)]
# Set up some time/location metadata
self.metadata['msg_time'] = nexrad_to_datetime(self.header.date,
self.header.time * 1000)
self.metadata['vol_time'] = nexrad_to_datetime(self.prod_desc.vol_date,
self.prod_desc.vol_start_time * 1000)
self.metadata['prod_time'] = nexrad_to_datetime(self.prod_desc.prod_gen_date,
self.prod_desc.prod_gen_time * 1000)
self.lat = self.prod_desc.lat * 0.001
self.lon = self.prod_desc.lon * 0.001
self.height = self.prod_desc.height
# Handle product-specific blocks. Default to compression and elevation angle
# Also get other product specific information, like name,
# maximum range, and how to map data bytes to values
default = ('Unknown Product', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)), ('defaultVals', 0)))
self.product_name, self.max_range, mapper, meta = self.prod_spec_map.get(
self.header.code, default)
log.debug('Product info--name: %s max_range: %f mapper: %s metadata: %s',
self.product_name, self.max_range, mapper, meta)
for name, block in meta:
if callable(block):
self.metadata[name] = block(self.depVals)
else:
self.metadata[name] = self.depVals[block]
# Now that we have the header, we have everything needed to make tables
# Store as class that can be called
self.map_data = mapper(self)
# Process compression if indicated. We need to fail
# gracefully here since we default to it being on
if self.metadata.get('compression', False):
try:
comp_start = self._buffer.set_mark()
decomp_data = self._buffer.read_func(bz2.decompress)
self._buffer.splice(comp_start, decomp_data)
assert self._buffer.check_remains(self.metadata['uncompressed_size'])
except OSError:
# Compression didn't work, so we just assume it wasn't actually compressed.
pass
# Unpack the various blocks, if present. The factor of 2 converts from
# 'half-words' to bytes
# Check to see if this is one of the "special" products that uses
# header-free blocks and re-assigns the offsets
if self.header.code in self.standalone_tabular:
if self.prod_desc.sym_off:
# For standalone tabular alphanumeric, symbology offset is
# actually tabular
self._unpack_tabblock(msg_start, 2 * self.prod_desc.sym_off, False)
if self.prod_desc.graph_off:
# Offset seems to be off by 1 from where we're counting, but
# it's not clear why.
self._unpack_standalone_graphblock(msg_start,
2 * (self.prod_desc.graph_off - 1))
# Need special handling for (old) radar coded message format
elif self.header.code == 74:
self._unpack_rcm(msg_start, 2 * self.prod_desc.sym_off)
else:
if self.prod_desc.sym_off:
self._unpack_symblock(msg_start, 2 * self.prod_desc.sym_off)
if self.prod_desc.graph_off:
self._unpack_graphblock(msg_start, 2 * self.prod_desc.graph_off)
if self.prod_desc.tab_off:
self._unpack_tabblock(msg_start, 2 * self.prod_desc.tab_off)
if 'defaultVals' in self.metadata:
log.warning('%s: Using default metadata for product %d',
self.filename, self.header.code)
def _process_wmo_header(self):
# Read off the WMO header if necessary
data = self._buffer.get_next(64).decode('ascii', 'ignore')
match = self.wmo_finder.search(data)
log.debug('WMO Header: %s', match)
if match:
self.wmo_code = match.groups()[0]
self.siteID = match.groups()[-1]
self._buffer.skip(match.end())
else:
self.wmo_code = ''
def _process_end_bytes(self):
check_bytes = self._buffer[-4:-1]
log.debug('End Bytes: %s', check_bytes)
if check_bytes in (b'\r\r\n', b'\xff\xff\n'):
self._buffer.truncate(4)
@staticmethod
def _unpack_rle_data(data):
# Unpack Run-length encoded data
unpacked = []
for run in data:
num, val = run >> 4, run & 0x0F
unpacked.extend([val] * num)
return unpacked
@staticmethod
def pos_scale(is_sym_block):
"""Scale of the position information in km."""
return 0.25 if is_sym_block else 1
def _unpack_rcm(self, start, offset):
self._buffer.jump_to(start, offset)
header = self._buffer.read_ascii(10)
assert header == '1234 ROBUU'
text_data = self._buffer.read_ascii()
end = 0
# Appendix B of ICD tells how to interpret this stuff, but that just
# doesn't seem worth it.
for marker, name in [('AA', 'ref'), ('BB', 'vad'), ('CC', 'remarks')]:
start = text_data.find('/NEXR' + marker, end)
# For part C the search for end fails, but returns -1, which works
end = text_data.find('/END' + marker, start)
setattr(self, 'rcm_' + name, text_data[start:end])
def _unpack_symblock(self, start, offset):
self._buffer.jump_to(start, offset)
blk = self._buffer.read_struct(self.sym_block_fmt)
log.debug('Symbology block info: %s', blk)
self.sym_block = []
assert blk.divider == -1, ('Bad divider for symbology block: {:d} should be -1'
.format(blk.divider))
assert blk.block_id == 1, ('Bad block ID for symbology block: {:d} should be 1'
.format(blk.block_id))
for _ in range(blk.nlayer):
layer_hdr = self._buffer.read_struct(self.sym_layer_fmt)
assert layer_hdr.divider == -1
layer = []
self.sym_block.append(layer)
layer_start = self._buffer.set_mark()
while self._buffer.offset_from(layer_start) < layer_hdr.length:
packet_code = self._buffer.read_int(2, 'big', signed=False)
if packet_code in self.packet_map:
layer.append(self.packet_map[packet_code](self, packet_code, True))
else:
log.warning('%s: Unknown symbology packet type %d/%x.',
self.filename, packet_code, packet_code)
self._buffer.jump_to(layer_start, layer_hdr.length)
assert self._buffer.offset_from(layer_start) == layer_hdr.length
def _unpack_graphblock(self, start, offset):
self._buffer.jump_to(start, offset)
hdr = self._buffer.read_struct(self.graph_block_fmt)
assert hdr.divider == -1, ('Bad divider for graphical block: {:d} should be -1'
.format(hdr.divider))
assert hdr.block_id == 2, ('Bad block ID for graphical block: {:d} should be 1'
.format(hdr.block_id))
self.graph_pages = []
for page in range(hdr.num_pages):
page_num = self._buffer.read_int(2, 'big', signed=False)
assert page + 1 == page_num
page_size = self._buffer.read_int(2, 'big', signed=False)
page_start = self._buffer.set_mark()
packets = []
while self._buffer.offset_from(page_start) < page_size:
packet_code = self._buffer.read_int(2, 'big', signed=False)
if packet_code in self.packet_map:
packets.append(self.packet_map[packet_code](self, packet_code, False))
else:
log.warning('%s: Unknown graphical packet type %d/%x.',
self.filename, packet_code, packet_code)
self._buffer.skip(page_size)
self.graph_pages.append(packets)
def _unpack_standalone_graphblock(self, start, offset):
self._buffer.jump_to(start, offset)
packets = []
while not self._buffer.at_end():
packet_code = self._buffer.read_int(2, 'big', signed=False)
if packet_code in self.packet_map:
packets.append(self.packet_map[packet_code](self, packet_code, False))
else:
log.warning('%s: Unknown standalone graphical packet type %d/%x.',
self.filename, packet_code, packet_code)
# Assume next 2 bytes is packet length and try skipping
num_bytes = self._buffer.read_int(2, 'big', signed=False)
self._buffer.skip(num_bytes)
self.graph_pages = [packets]
def _unpack_tabblock(self, start, offset, have_header=True):
self._buffer.jump_to(start, offset)
block_start = self._buffer.set_mark()
# Read the header and validate if needed
if have_header:
header = self._buffer.read_struct(self.tab_header_fmt)
assert header.divider == -1
assert header.block_id == 3
# Read off secondary message and product description blocks,
# but as far as I can tell, all we really need is the text that follows
self._buffer.read_struct(self.header_fmt)
self._buffer.read_struct(self.prod_desc_fmt)
# Get the start of the block with number of pages and divider
blk = self._buffer.read_struct(self.tab_block_fmt)
assert blk.divider == -1
# Read the pages line by line, break pages on a -1 character count
self.tab_pages = []
for _ in range(blk.num_pages):
lines = []
num_chars = self._buffer.read_int(2, 'big', signed=True)
while num_chars != -1:
lines.append(''.join(self._buffer.read_ascii(num_chars)))
num_chars = self._buffer.read_int(2, 'big', signed=True)
self.tab_pages.append('\n'.join(lines))
if have_header:
assert self._buffer.offset_from(block_start) == header.block_len
def __repr__(self):
"""Return the string representation of the product."""
attrs = ('product_name', 'header', 'prod_desc', 'thresholds', 'depVals', 'metadata',
'gsm', 'gsm_additional', 'siteID')
blocks = [str(getattr(self, name)) for name in attrs if hasattr(self, name)]
return self.filename + ': ' + '\n'.join(blocks)
def _unpack_packet_radial_data(self, code, in_sym_block):
hdr_fmt = NamedStruct([('ind_first_bin', 'H'), ('nbins', 'H'),
('i_center', 'h'), ('j_center', 'h'),
('scale_factor', 'h'), ('num_rad', 'H')],
'>', 'RadialHeader')
rad_fmt = NamedStruct([('num_hwords', 'H'), ('start_angle', 'h'),
('angle_delta', 'h')], '>', 'RadialData')
hdr = self._buffer.read_struct(hdr_fmt)
rads = []
for _ in range(hdr.num_rad):
rad = self._buffer.read_struct(rad_fmt)
start_az = rad.start_angle * 0.1
end_az = start_az + rad.angle_delta * 0.1
rads.append((start_az, end_az,
self._unpack_rle_data(
self._buffer.read_binary(2 * rad.num_hwords))))
start, end, vals = zip(*rads)
return {'start_az': list(start), 'end_az': list(end), 'data': list(vals),
'center': (hdr.i_center * self.pos_scale(in_sym_block),
hdr.j_center * self.pos_scale(in_sym_block)),
'gate_scale': hdr.scale_factor * 0.001, 'first': hdr.ind_first_bin}
digital_radial_hdr_fmt = NamedStruct([('ind_first_bin', 'H'), ('nbins', 'H'),
('i_center', 'h'), ('j_center', 'h'),
('scale_factor', 'h'), ('num_rad', 'H')],
'>', 'DigitalRadialHeader')
digital_radial_fmt = NamedStruct([('num_bytes', 'H'), ('start_angle', 'h'),
('angle_delta', 'h')], '>', 'DigitalRadialData')
def _unpack_packet_digital_radial(self, code, in_sym_block):
hdr = self._buffer.read_struct(self.digital_radial_hdr_fmt)
rads = []
for _ in range(hdr.num_rad):
rad = self._buffer.read_struct(self.digital_radial_fmt)
start_az = rad.start_angle * 0.1
end_az = start_az + rad.angle_delta * 0.1
rads.append((start_az, end_az, self._buffer.read_binary(rad.num_bytes)))
start, end, vals = zip(*rads)
return {'start_az': list(start), 'end_az': list(end), 'data': list(vals),
'center': (hdr.i_center * self.pos_scale(in_sym_block),
hdr.j_center * self.pos_scale(in_sym_block)),
'gate_scale': hdr.scale_factor * 0.001, 'first': hdr.ind_first_bin}
def _unpack_packet_raster_data(self, code, in_sym_block):
hdr_fmt = NamedStruct([('code', 'L'),
('i_start', 'h'), ('j_start', 'h'), # start in km/4
('xscale_int', 'h'), ('xscale_frac', 'h'),
('yscale_int', 'h'), ('yscale_frac', 'h'),
('num_rows', 'h'), ('packing', 'h')], '>', 'RasterData')
hdr = self._buffer.read_struct(hdr_fmt)
assert hdr.code == 0x800000C0
assert hdr.packing == 2
rows = []
for _ in range(hdr.num_rows):
num_bytes = self._buffer.read_int(2, 'big', signed=False)
rows.append(self._unpack_rle_data(self._buffer.read_binary(num_bytes)))
return {'start_x': hdr.i_start * hdr.xscale_int,
'start_y': hdr.j_start * hdr.yscale_int, 'data': rows}
def _unpack_packet_uniform_text(self, code, in_sym_block):
# By not using a struct, we can handle multiple codes
num_bytes = self._buffer.read_int(2, 'big', signed=False)
if code == 8:
value = self._buffer.read_int(2, 'big', signed=False)
read_bytes = 6
else:
value = None
read_bytes = 4
i_start = self._buffer.read_int(2, 'big', signed=True)
j_start = self._buffer.read_int(2, 'big', signed=True)
# Text is what remains beyond what's been read, not including byte count
text = ''.join(self._buffer.read_ascii(num_bytes - read_bytes))
return {'x': i_start * self.pos_scale(in_sym_block),
'y': j_start * self.pos_scale(in_sym_block), 'color': value, 'text': text}
def _unpack_packet_special_text_symbol(self, code, in_sym_block):
d = self._unpack_packet_uniform_text(code, in_sym_block)
# Translate special characters to their meaning
ret = {}
symbol_map = {'!': 'past storm position', '"': 'current storm position',
'#': 'forecast storm position', '$': 'past MDA position',
'%': 'forecast MDA position', ' ': None}
# Use this meaning as the key in the returned packet
for c in d['text']:
if c not in symbol_map:
log.warning('%s: Unknown special symbol %d/%x.', self.filename, c, ord(c))
else:
key = symbol_map[c]
if key:
ret[key] = d['x'], d['y']
del d['text']
return ret
def _unpack_packet_special_graphic_symbol(self, code, in_sym_block):
type_map = {3: 'Mesocyclone', 11: '3D Correlated Shear', 12: 'TVS',
26: 'ETVS', 13: 'Positive Hail', 14: 'Probable Hail',
15: 'Storm ID', 19: 'HDA', 25: 'STI Circle'}
point_feature_map = {1: 'Mesocyclone (ext.)', 3: 'Mesocyclone',
5: 'TVS (Ext.)', 6: 'ETVS (Ext.)', 7: 'TVS',
8: 'ETVS', 9: 'MDA', 10: 'MDA (Elev.)', 11: 'MDA (Weak)'}
# Read the number of bytes and set a mark for sanity checking
num_bytes = self._buffer.read_int(2, 'big', signed=False)
packet_data_start = self._buffer.set_mark()
scale = self.pos_scale(in_sym_block)
# Loop over the bytes we have
ret = defaultdict(list)
while self._buffer.offset_from(packet_data_start) < num_bytes:
# Read position
ret['x'].append(self._buffer.read_int(2, 'big', signed=True) * scale)
ret['y'].append(self._buffer.read_int(2, 'big', signed=True) * scale)
# Handle any types that have additional info
if code in (3, 11, 25):
ret['radius'].append(self._buffer.read_int(2, 'big', signed=True) * scale)
elif code == 15:
ret['id'].append(''.join(self._buffer.read_ascii(2)))
elif code == 19:
ret['POH'].append(self._buffer.read_int(2, 'big', signed=True))
ret['POSH'].append(self._buffer.read_int(2, 'big', signed=True))
ret['Max Size'].append(self._buffer.read_int(2, 'big', signed=False))
elif code == 20:
kind = self._buffer.read_int(2, 'big', signed=False)
attr = self._buffer.read_int(2, 'big', signed=False)
if kind < 5 or kind > 8:
ret['radius'].append(attr * scale)
if kind not in point_feature_map:
log.warning('%s: Unknown graphic symbol point kind %d/%x.',
self.filename, kind, kind)
ret['type'].append(f'Unknown ({kind:d})')
else:
ret['type'].append(point_feature_map[kind])
# Map the code to a name for this type of symbol
if code != 20:
if code not in type_map:
log.warning('%s: Unknown graphic symbol type %d/%x.',
self.filename, code, code)
ret['type'] = 'Unknown'
else:
ret['type'] = type_map[code]
# Check and return
assert self._buffer.offset_from(packet_data_start) == num_bytes
# Reduce dimensions of lists if possible
reduce_lists(ret)
return ret
def _unpack_packet_scit(self, code, in_sym_block):
num_bytes = self._buffer.read_int(2, 'big', signed=False)
packet_data_start = self._buffer.set_mark()
ret = defaultdict(list)
while self._buffer.offset_from(packet_data_start) < num_bytes:
next_code = self._buffer.read_int(2, 'big', signed=False)
if next_code not in self.packet_map:
log.warning('%s: Unknown packet in SCIT %d/%x.',
self.filename, next_code, next_code)
self._buffer.jump_to(packet_data_start, num_bytes)
return ret
else:
next_packet = self.packet_map[next_code](self, next_code, in_sym_block)
if next_code == 6:
ret['track'].append(next_packet['vectors'])
elif next_code == 25:
ret['STI Circle'].append(next_packet)
elif next_code == 2:
ret['markers'].append(next_packet)
else:
log.warning('%s: Unsupported packet in SCIT %d/%x.',
self.filename, next_code, next_code)
ret['data'].append(next_packet)
reduce_lists(ret)
return ret
def _unpack_packet_digital_precipitation(self, code, in_sym_block):
# Read off a couple of unused spares
self._buffer.read_int(2, 'big', signed=False)
self._buffer.read_int(2, 'big', signed=False)
# Get the size of the grid
lfm_boxes = self._buffer.read_int(2, 'big', signed=False)
num_rows = self._buffer.read_int(2, 'big', signed=False)
rows = []
# Read off each row and decode the RLE data
for _ in range(num_rows):
row_num_bytes = self._buffer.read_int(2, 'big', signed=False)
row_bytes = self._buffer.read_binary(row_num_bytes)
if code == 18:
row = self._unpack_rle_data(row_bytes)
else:
row = []
for run, level in zip(row_bytes[::2], row_bytes[1::2]):
row.extend([level] * run)
assert len(row) == lfm_boxes
rows.append(row)
return {'data': rows}
def _unpack_packet_linked_vector(self, code, in_sym_block):
num_bytes = self._buffer.read_int(2, 'big', signed=True)
if code == 9:
value = self._buffer.read_int(2, 'big', signed=True)
num_bytes -= 2
else:
value = None
scale = self.pos_scale(in_sym_block)
pos = [b * scale for b in self._buffer.read_binary(num_bytes / 2, '>h')]
vectors = list(zip(pos[::2], pos[1::2]))
return {'vectors': vectors, 'color': value}
def _unpack_packet_vector(self, code, in_sym_block):
num_bytes = self._buffer.read_int(2, 'big', signed=True)
if code == 10:
value = self._buffer.read_int(2, 'big', signed=True)
num_bytes -= 2
else:
value = None
scale = self.pos_scale(in_sym_block)
pos = [p * scale for p in self._buffer.read_binary(num_bytes / 2, '>h')]
vectors = list(zip(pos[::4], pos[1::4], pos[2::4], pos[3::4]))
return {'vectors': vectors, 'color': value}
def _unpack_packet_contour_color(self, code, in_sym_block):
# Check for color value indicator
assert self._buffer.read_int(2, 'big', signed=False) == 0x0002
# Read and return value (level) of contour
return {'color': self._buffer.read_int(2, 'big', signed=False)}
def _unpack_packet_linked_contour(self, code, in_sym_block):
# Check for initial point indicator
assert self._buffer.read_int(2, 'big', signed=False) == 0x8000
scale = self.pos_scale(in_sym_block)
startx = self._buffer.read_int(2, 'big', signed=True) * scale
starty = self._buffer.read_int(2, 'big', signed=True) * scale
vectors = [(startx, starty)]
num_bytes = self._buffer.read_int(2, 'big', signed=False)
pos = [b * scale for b in self._buffer.read_binary(num_bytes / 2, '>h')]
vectors.extend(zip(pos[::2], pos[1::2]))
return {'vectors': vectors}
def _unpack_packet_wind_barbs(self, code, in_sym_block):
# Figure out how much to read
num_bytes = self._buffer.read_int(2, 'big', signed=True)
packet_data_start = self._buffer.set_mark()
ret = defaultdict(list)
# Read while we have data, then return
while self._buffer.offset_from(packet_data_start) < num_bytes:
ret['color'].append(self._buffer.read_int(2, 'big', signed=True))
ret['x'].append(self._buffer.read_int(2, 'big', signed=True)
* self.pos_scale(in_sym_block))
ret['y'].append(self._buffer.read_int(2, 'big', signed=True)
* self.pos_scale(in_sym_block))
ret['direc'].append(self._buffer.read_int(2, 'big', signed=True))
ret['speed'].append(self._buffer.read_int(2, 'big', signed=True))
return ret
def _unpack_packet_generic(self, code, in_sym_block):
# Reserved HW
assert self._buffer.read_int(2, 'big', signed=True) == 0
# Read number of bytes (2 HW) and return
num_bytes = self._buffer.read_int(4, 'big', signed=True)
hunk = self._buffer.read(num_bytes)
xdrparser = Level3XDRParser(hunk)
return xdrparser(code)
def _unpack_packet_trend_times(self, code, in_sym_block):
self._buffer.read_int(2, 'big', signed=True) # number of bytes, not needed to process
return {'times': self._read_trends()}
def _unpack_packet_cell_trend(self, code, in_sym_block):
code_map = ['Cell Top', 'Cell Base', 'Max Reflectivity Height',
'Probability of Hail', 'Probability of Severe Hail',
'Cell-based VIL', 'Maximum Reflectivity',
'Centroid Height']
code_scales = [100, 100, 100, 1, 1, 1, 1, 100]
num_bytes = self._buffer.read_int(2, 'big', signed=True)
packet_data_start = self._buffer.set_mark()
cell_id = ''.join(self._buffer.read_ascii(2))
x = self._buffer.read_int(2, 'big', signed=True) * self.pos_scale(in_sym_block)
y = self._buffer.read_int(2, 'big', signed=True) * self.pos_scale(in_sym_block)
ret = {'id': cell_id, 'x': x, 'y': y}
while self._buffer.offset_from(packet_data_start) < num_bytes:
code = self._buffer.read_int(2, 'big', signed=True)
try:
ind = code - 1
key = code_map[ind]
scale = code_scales[ind]
except IndexError:
log.warning('%s: Unsupported trend code %d/%x.', self.filename, code, code)
key = 'Unknown'
scale = 1
vals = self._read_trends()
if code in (1, 2):
ret[f'{key} Limited'] = [bool(v > 700) for v in vals]
vals = [v - 1000 if v > 700 else v for v in vals]
ret[key] = [v * scale for v in vals]
return ret
def _read_trends(self):
num_vols, latest = self._buffer.read(2)
vals = [self._buffer.read_int(2, 'big', signed=True) for _ in range(num_vols)]
# Wrap the circular buffer so that latest is last
return vals[latest:] + vals[:latest]
packet_map = {1: _unpack_packet_uniform_text,
2: _unpack_packet_special_text_symbol,
3: _unpack_packet_special_graphic_symbol,
4: _unpack_packet_wind_barbs,
6: _unpack_packet_linked_vector,
8: _unpack_packet_uniform_text,
# 9: _unpack_packet_linked_vector,
10: _unpack_packet_vector,
11: _unpack_packet_special_graphic_symbol,
12: _unpack_packet_special_graphic_symbol,
13: _unpack_packet_special_graphic_symbol,
14: _unpack_packet_special_graphic_symbol,
15: _unpack_packet_special_graphic_symbol,
16: _unpack_packet_digital_radial,
17: _unpack_packet_digital_precipitation,
18: _unpack_packet_digital_precipitation,
19: _unpack_packet_special_graphic_symbol,
20: _unpack_packet_special_graphic_symbol,
21: _unpack_packet_cell_trend,
22: _unpack_packet_trend_times,
23: _unpack_packet_scit,
24: _unpack_packet_scit,
25: _unpack_packet_special_graphic_symbol,
26: _unpack_packet_special_graphic_symbol,
28: _unpack_packet_generic,
29: _unpack_packet_generic,
0x0802: _unpack_packet_contour_color,
0x0E03: _unpack_packet_linked_contour,
0xaf1f: _unpack_packet_radial_data,
0xba07: _unpack_packet_raster_data}
class Level3XDRParser(Unpacker):
"""Handle XDR-formatted Level 3 NEXRAD products."""
def __call__(self, code):
"""Perform the actual unpacking."""
xdr = OrderedDict()
if code == 28:
xdr.update(self._unpack_prod_desc())
else:
log.warning('XDR: code %d not implemented', code)
# Check that we got it all
self.done()
return xdr
def unpack_string(self):
"""Unpack the internal data as a string."""
return Unpacker.unpack_string(self).decode('ascii')
def _unpack_prod_desc(self):
xdr = OrderedDict()
# NOTE: The ICD (262001U) incorrectly lists op-mode, vcp, el_num, and
# spare as int*2. Changing to int*4 makes things parse correctly.
xdr['name'] = self.unpack_string()
xdr['description'] = self.unpack_string()
xdr['code'] = self.unpack_int()
xdr['type'] = self.unpack_int()
xdr['prod_time'] = self.unpack_uint()
xdr['radar_name'] = self.unpack_string()
xdr['latitude'] = self.unpack_float()
xdr['longitude'] = self.unpack_float()
xdr['height'] = self.unpack_float()
xdr['vol_time'] = self.unpack_uint()
xdr['el_time'] = self.unpack_uint()
xdr['el_angle'] = self.unpack_float()
xdr['vol_num'] = self.unpack_int()
xdr['op_mode'] = self.unpack_int()
xdr['vcp_num'] = self.unpack_int()
xdr['el_num'] = self.unpack_int()
xdr['compression'] = self.unpack_int()
xdr['uncompressed_size'] = self.unpack_int()
xdr['parameters'] = self._unpack_parameters()
xdr['components'] = self._unpack_components()
return xdr
def _unpack_parameters(self):
num = self.unpack_int()
# ICD documents a "pointer" here, that seems to be garbage. Just read
# and use the number, starting the list immediately.
self.unpack_int()
if num == 0:
return None
ret = []
for i in range(num):
ret.append((self.unpack_string(), self.unpack_string()))
if i < num - 1:
self.unpack_int() # Another pointer for the 'list' ?
if num == 1:
ret = ret[0]
return ret
def _unpack_components(self):
num = self.unpack_int()
# ICD documents a "pointer" here, that seems to be garbage. Just read
# and use the number, starting the list immediately.
self.unpack_int()
ret = []
for i in range(num):
try:
code = self.unpack_int()
ret.append(self._component_lookup[code](self))
if i < num - 1:
self.unpack_int() # Another pointer for the 'list' ?
except KeyError:
log.warning('Unknown XDR Component: %d', code)
break
if num == 1:
ret = ret[0]
return ret
radial_fmt = namedtuple('RadialComponent', ['description', 'gate_width',
'first_gate', 'parameters',
'radials'])
radial_data_fmt = namedtuple('RadialData', ['azimuth', 'elevation', 'width',
'num_bins', 'attributes',
'data'])
def _unpack_radial(self):
ret = self.radial_fmt(description=self.unpack_string(),
gate_width=self.unpack_float(),
first_gate=self.unpack_float(),
parameters=self._unpack_parameters(),
radials=None)
num_rads = self.unpack_int()
rads = []
for _ in range(num_rads):
# ICD is wrong, says num_bins is float, should be int
rads.append(self.radial_data_fmt(azimuth=self.unpack_float(),
elevation=self.unpack_float(),
width=self.unpack_float(),
num_bins=self.unpack_int(),
attributes=self.unpack_string(),
data=self.unpack_array(self.unpack_int)))
return ret._replace(radials=rads)
text_fmt = namedtuple('TextComponent', ['parameters', 'text'])
def _unpack_text(self):
return self.text_fmt(parameters=self._unpack_parameters(),
text=self.unpack_string())
_component_lookup = {1: _unpack_radial, 4: _unpack_text}
@exporter.export
def is_precip_mode(vcp_num):
r"""Determine if the NEXRAD radar is operating in precipitation mode.
Parameters
----------
vcp_num : int
The NEXRAD volume coverage pattern (VCP) number
Returns
-------
bool
True if the VCP corresponds to precipitation mode, False otherwise
"""
return vcp_num // 10 != 3
| 48.482334
| 95
| 0.492568
|
0082ee9e6f7f9553ccaac67cc20d5954b0b2297d
| 45,127
|
py
|
Python
|
verticapy/stats/math.py
|
vertica/vertica_ml_python
|
9e82dba94afe8447bfa2492f343af6669128e2fb
|
[
"Apache-2.0"
] | 7
|
2018-05-10T08:16:31.000Z
|
2018-05-15T00:59:26.000Z
|
verticapy/stats/math.py
|
vertica/vertica_ml_python
|
9e82dba94afe8447bfa2492f343af6669128e2fb
|
[
"Apache-2.0"
] | 1
|
2018-05-15T00:15:35.000Z
|
2018-05-15T13:40:19.000Z
|
verticapy/stats/math.py
|
vertica/vertica_ml_python
|
9e82dba94afe8447bfa2492f343af6669128e2fb
|
[
"Apache-2.0"
] | null | null | null |
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# VerticaPy Modules
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.stats.tools import *
#
# Variables
# ---#
pi = str_sql("PI()")
e = str_sql("EXP(1)")
tau = str_sql("2 * PI()")
inf = str_sql("'inf'::float")
nan = str_sql("'nan'::float")
# Soundex
# ---#
def edit_distance(
expr1, expr2,
):
"""
---------------------------------------------------------------------------
Calculates and returns the Levenshtein distance between the two strings.
Parameters
----------
expr1: object
Expression.
expr2: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
version(condition=[10, 1, 0])
expr1 = format_magic(expr1)
expr2 = format_magic(expr2)
return str_sql("EDIT_DISTANCE({}, {})".format(expr1, expr2), "int")
levenshtein = edit_distance
# ---#
def soundex(expr):
"""
---------------------------------------------------------------------------
Returns Soundex encoding of a varchar strings as a four -character string.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
version(condition=[10, 1, 0])
expr = format_magic(expr)
return str_sql("SOUNDEX({})".format(expr), "varchar")
# ---#
def soundex_matches(
expr1, expr2,
):
"""
---------------------------------------------------------------------------
Generates and compares Soundex encodings of two strings, and returns a count
of the matching characters (ranging from 0 for no match to 4 for an exact
match).
Parameters
----------
expr1: object
Expression.
expr2: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
version(condition=[10, 1, 0])
expr1 = format_magic(expr1)
expr2 = format_magic(expr2)
return str_sql("SOUNDEX_MATCHES({}, {})".format(expr1, expr2), "int")
# Regular Expressions
# ---#
def regexp_count(
expr, pattern, position: int = 1,
):
"""
---------------------------------------------------------------------------
Returns the number times a regular expression matches a string.
Parameters
----------
expr: object
Expression.
pattern: object
The regular expression to search for within string.
position: int, optional
The number of characters from the start of the string where the function
should start searching for matches.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql("REGEXP_COUNT({}, {}, {})".format(expr, pattern, position), "int")
# ---#
def regexp_ilike(expr, pattern):
"""
---------------------------------------------------------------------------
Returns true if the string contains a match for the regular expression.
Parameters
----------
expr: object
Expression.
pattern: object
A string containing the regular expression to match against the string.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql("REGEXP_ILIKE({}, {})".format(expr, pattern))
# ---#
def regexp_instr(
expr, pattern, position: int = 1, occurrence: int = 1, return_position: int = 0
):
"""
---------------------------------------------------------------------------
Returns the starting or ending position in a string where a regular
expression matches.
Parameters
----------
expr: object
Expression.
pattern: object
The regular expression to search for within the string.
position: int, optional
The number of characters from the start of the string where the function
should start searching for matches.
occurrence: int, optional
Controls which occurrence of a pattern match in the string to return.
return_position: int, optional
Sets the position within the string to return.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql(
"REGEXP_INSTR({}, {}, {}, {}, {})".format(
expr, pattern, position, occurrence, return_position
)
)
# ---#
def regexp_like(expr, pattern):
"""
---------------------------------------------------------------------------
Returns true if the string matches the regular expression.
Parameters
----------
expr: object
Expression.
pattern: object
A string containing the regular expression to match against the string.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql("REGEXP_LIKE({}, {})".format(expr, pattern))
# ---#
def regexp_replace(expr, target, replacement, position: int = 1, occurrence: int = 1):
"""
---------------------------------------------------------------------------
Replace all occurrences of a substring that match a regular expression
with another substring.
Parameters
----------
expr: object
Expression.
target: object
The regular expression to search for within the string.
replacement: object
The string to replace matched substrings.
position: int, optional
The number of characters from the start of the string where the function
should start searching for matches.
occurrence: int, optional
Controls which occurrence of a pattern match in the string to return.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
target = format_magic(target)
replacement = format_magic(replacement)
return str_sql(
"REGEXP_REPLACE({}, {}, {}, {}, {})".format(
expr, target, replacement, position, occurrence
)
)
# ---#
def regexp_substr(expr, pattern, position: int = 1, occurrence: int = 1):
"""
---------------------------------------------------------------------------
Returns the substring that matches a regular expression within a string.
Parameters
----------
expr: object
Expression.
pattern: object
The regular expression to find a substring to extract.
position: int, optional
The number of characters from the start of the string where the function
should start searching for matches.
occurrence: int, optional
Controls which occurrence of a pattern match in the string to return.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql(
"REGEXP_SUBSTR({}, {}, {}, {})".format(expr, pattern, position, occurrence)
)
# String Functions
# ---#
def length(expr):
"""
---------------------------------------------------------------------------
Returns the length of a string.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LENGTH({})".format(expr), "int")
# ---#
def lower(expr):
"""
---------------------------------------------------------------------------
Returns a VARCHAR value containing the argument converted to
lowercase letters.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LOWER({})".format(expr), "text")
# ---#
def substr(expr, position: int, extent: int = None):
"""
---------------------------------------------------------------------------
Returns VARCHAR or VARBINARY value representing a substring of a specified
string.
Parameters
----------
expr: object
Expression.
position: int
Starting position of the substring.
extent: int, optional
Length of the substring to extract.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
if extent:
position = "{}, {}".format(position, extent)
return str_sql("SUBSTR({}, {})".format(expr, position), "text")
# ---#
def upper(expr):
"""
---------------------------------------------------------------------------
Returns a VARCHAR value containing the argument converted to uppercase
letters.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("UPPER({})".format(expr), "text")
# Aggregate & Analytical functions
# ---#
def apply(func: str, *args, **kwargs):
"""
---------------------------------------------------------------------------
Applies any Vertica function on the input expressions.
Please check-out Vertica Documentation to see the available functions:
https://www.vertica.com/docs/10.0.x/HTML/Content/Authoring/
SQLReferenceManual/Functions/SQLFunctions.htm?tocpath=
SQL%20Reference%20Manual|SQL%20Functions|_____0
Parameters
----------
func : str
Vertica Function. In case of geospatial, you can write the function name
without the prefix ST_ or STV_.
args : object, optional
Expressions.
kwargs: object, optional
Optional Parameters Expressions.
Returns
-------
str_sql
SQL expression.
"""
ST_f = [
"Area",
"AsBinary",
"Boundary",
"Buffer",
"Centroid",
"Contains",
"ConvexHull",
"Crosses",
"Difference",
"Disjoint",
"Distance",
"Envelope",
"Equals",
"GeographyFromText",
"GeographyFromWKB",
"GeoHash",
"GeometryN",
"GeometryType",
"GeomFromGeoHash",
"GeomFromText",
"GeomFromWKB",
"Intersection",
"Intersects",
"IsEmpty",
"IsSimple",
"IsValid",
"Length",
"NumGeometries",
"NumPoints",
"Overlaps",
"PointFromGeoHash",
"PointN",
"Relate",
"SRID",
"SymDifference",
"Touches",
"Transform",
"Union",
"Within",
"X",
"XMax",
"XMin",
"YMax",
"YMin",
"Y",
]
STV_f = [
"AsGeoJSON",
"Create_Index",
"Describe_Index",
"Drop_Index",
"DWithin",
"Export2Shapefile",
"Extent",
"ForceLHR",
"Geography",
"GeographyPoint",
"Geometry",
"GeometryPoint",
"GetExportShapefileDirectory",
"Intersect",
"IsValidReason",
"LineStringPoint",
"MemSize",
"NN",
"PolygonPoint",
"Reverse",
"Rename_Index",
"Refresh_Index",
"SetExportShapefileDirectory",
"ShpSource",
"ShpParser",
"ShpCreateTable",
]
ST_f_lower = [elem.lower() for elem in ST_f]
STV_f_lower = [elem.lower() for elem in STV_f]
if func.lower() in ST_f_lower:
func = "ST_" + func
elif func.lower() in STV_f_lower:
func = "STV_" + func
if len(args) > 0:
expr = ", ".join([str(format_magic(elem)) for elem in args])
else:
expr = ""
if len(kwargs) > 0:
param_expr = ", ".join(
[str((elem + " = ") + str(format_magic(kwargs[elem]))) for elem in kwargs]
)
else:
param_expr = ""
if param_expr:
param_expr = " USING PARAMETERS " + param_expr
return str_sql("{}({}{})".format(func.upper(), expr, param_expr))
# ---#
def avg(expr):
"""
---------------------------------------------------------------------------
Computes the average (arithmetic mean) of an expression over a group of rows.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("AVG({})".format(expr), "float")
mean = avg
# ---#
def bool_and(expr):
"""
---------------------------------------------------------------------------
Processes Boolean values and returns a Boolean value result. If all input
values are true, BOOL_AND returns True. Otherwise it returns False.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("BOOL_AND({})".format(expr), "int")
# ---#
def bool_or(expr):
"""
---------------------------------------------------------------------------
Processes Boolean values and returns a Boolean value result. If at least one
input value is true, BOOL_OR returns True. Otherwise, it returns False.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("BOOL_OR({})".format(expr), "int")
# ---#
def bool_xor(expr):
"""
---------------------------------------------------------------------------
Processes Boolean values and returns a Boolean value result. If specifically
only one input value is true, BOOL_XOR returns True. Otherwise, it returns
False.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("BOOL_XOR({})".format(expr), "int")
# ---#
def conditional_change_event(expr):
"""
---------------------------------------------------------------------------
Assigns an event window number to each row, starting from 0, and increments
by 1 when the result of evaluating the argument expression on the current
row differs from that on the previous row.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("CONDITIONAL_CHANGE_EVENT({})".format(expr), "int")
# ---#
def conditional_true_event(expr):
"""
---------------------------------------------------------------------------
Assigns an event window number to each row, starting from 0, and increments
the number by 1 when the result of the boolean argument expression evaluates
true.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("CONDITIONAL_TRUE_EVENT({})".format(expr), "int")
# ---#
def count(expr):
"""
---------------------------------------------------------------------------
Returns as a BIGINT the number of rows in each group where the expression is
not NULL.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("COUNT({})".format(expr), "int")
# ---#
def lag(expr, offset: int = 1):
"""
---------------------------------------------------------------------------
Returns the value of the input expression at the given offset before the
current row within a window.
Parameters
----------
expr: object
Expression.
offset: int
Indicates how great is the lag.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LAG({}, {})".format(expr, offset))
# ---#
def lead(expr, offset: int = 1):
"""
---------------------------------------------------------------------------
Returns values from the row after the current row within a window, letting
you access more than one row in a table at the same time.
Parameters
----------
expr: object
Expression.
offset: int
Indicates how great is the lead.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LEAD({}, {})".format(expr, offset))
# ---#
def max(expr):
"""
---------------------------------------------------------------------------
Returns the greatest value of an expression over a group of rows.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("MAX({})".format(expr), "float")
# ---#
def median(expr):
"""
---------------------------------------------------------------------------
Computes the approximate median of an expression over a group of rows.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("APPROXIMATE_MEDIAN({})".format(expr), "float")
# ---#
def min(expr):
"""
---------------------------------------------------------------------------
Returns the smallest value of an expression over a group of rows.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("MIN({})".format(expr), "float")
# ---#
def nth_value(expr, row_number: int):
"""
---------------------------------------------------------------------------
Returns the value evaluated at the row that is the nth row of the window
(counting from 1).
Parameters
----------
expr: object
Expression.
row_number: int
Specifies the row to evaluate.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("NTH_VALUE({}, {})".format(expr, row_number), "int")
# ---#
def quantile(expr, number: float):
"""
---------------------------------------------------------------------------
Computes the approximate percentile of an expression over a group of rows.
Parameters
----------
expr: object
Expression.
number: float
Percentile value, which must be a FLOAT constant ranging from 0 to 1
(inclusive).
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql(
"APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = {})".format(
expr, number
),
"float",
)
# ---#
def rank():
"""
---------------------------------------------------------------------------
Within each window partition, ranks all rows in the query results set
according to the order specified by the window's ORDER BY clause.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("RANK()", "int")
# ---#
def row_number():
"""
---------------------------------------------------------------------------
Assigns a sequence of unique numbers, starting from 1, to each row in a
window partition.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("ROW_NUMBER()", "int")
# ---#
def std(expr):
"""
---------------------------------------------------------------------------
Evaluates the statistical sample standard deviation for each member of the
group.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("STDDEV({})".format(expr), "float")
stddev = std
# ---#
def sum(expr):
"""
---------------------------------------------------------------------------
Computes the sum of an expression over a group of rows.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("SUM({})".format(expr), "float")
# ---#
def var(expr):
"""
---------------------------------------------------------------------------
Evaluates the sample variance for each row of the group.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("VARIANCE({})".format(expr), "float")
variance = var
# Mathematical Functions
# ---#
def abs(expr):
"""
---------------------------------------------------------------------------
Absolute Value.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ABS({})".format(expr), "float")
# ---#
def acos(expr):
"""
---------------------------------------------------------------------------
Trigonometric Inverse Cosine.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ACOS({})".format(expr), "float")
# ---#
def asin(expr):
"""
---------------------------------------------------------------------------
Trigonometric Inverse Sine.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ASIN({})".format(expr), "float")
# ---#
def atan(expr):
"""
---------------------------------------------------------------------------
Trigonometric Inverse Tangent.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ATAN({})".format(expr), "float")
# ---#
def case_when(*argv):
"""
---------------------------------------------------------------------------
Returns the conditional statement of the input arguments.
Parameters
----------
argv: object
Infinite Number of Expressions.
The expression generated will look like:
even: CASE ... WHEN argv[2 * i] THEN argv[2 * i + 1] ... END
odd : CASE ... WHEN argv[2 * i] THEN argv[2 * i + 1] ... ELSE argv[n] END
Returns
-------
str_sql
SQL expression.
"""
n = len(argv)
if n < 2:
raise ParameterError(
"The number of arguments of the 'case_when' function must be strictly greater than 1."
)
category = get_category_from_python_type(argv[1])
i = 0
expr = "CASE"
while i < n:
if i + 1 == n:
expr += " ELSE " + str(format_magic(argv[i]))
i += 1
else:
expr += (
" WHEN "
+ str(format_magic(argv[i]))
+ " THEN "
+ str(format_magic(argv[i + 1]))
)
i += 2
expr += " END"
return str_sql(expr, category)
# ---#
def cbrt(expr):
"""
---------------------------------------------------------------------------
Cube Root.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("CBRT({})".format(expr), "float")
# ---#
def ceil(expr):
"""
---------------------------------------------------------------------------
Ceiling Function.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("CEIL({})".format(expr), "float")
# ---#
def coalesce(expr, *argv):
"""
---------------------------------------------------------------------------
Returns the value of the first non-null expression in the list.
Parameters
----------
expr: object
Expression.
argv: object
Infinite Number of Expressions.
Returns
-------
str_sql
SQL expression.
"""
category = get_category_from_python_type(expr)
expr = [format_magic(expr)]
for arg in argv:
expr += [format_magic(arg)]
expr = ", ".join([str(elem) for elem in expr])
return str_sql("COALESCE({})".format(expr), category)
# ---#
def comb(n: int, k: int):
"""
---------------------------------------------------------------------------
Number of ways to choose k items from n items.
Parameters
----------
n : int
items to choose from.
k : int
items to choose.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("({})! / (({})! * ({} - {})!)".format(n, k, n, k), "float")
# ---#
def cos(expr):
"""
---------------------------------------------------------------------------
Trigonometric Cosine.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("COS({})".format(expr), "float")
# ---#
def cosh(expr):
"""
---------------------------------------------------------------------------
Hyperbolic Cosine.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("COSH({})".format(expr), "float")
# ---#
def cot(expr):
"""
---------------------------------------------------------------------------
Trigonometric Cotangent.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("COT({})".format(expr), "float")
# ---#
def date(expr):
"""
---------------------------------------------------------------------------
Converts the input value to a DATE data type.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("DATE({})".format(expr), "date")
# ---#
def day(expr):
"""
---------------------------------------------------------------------------
Returns as an integer the day of the month from the input expression.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("DAY({})".format(expr), "float")
# ---#
def dayofweek(expr):
"""
---------------------------------------------------------------------------
Returns the day of the week as an integer, where Sunday is day 1.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("DAYOFWEEK({})".format(expr), "float")
# ---#
def dayofyear(expr):
"""
---------------------------------------------------------------------------
Returns the day of the year as an integer, where January 1 is day 1.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("DAYOFYEAR({})".format(expr), "float")
# ---#
def decode(expr, *argv):
"""
---------------------------------------------------------------------------
Compares expression to each search value one by one.
Parameters
----------
expr: object
Expression.
argv: object
Infinite Number of Expressions.
The expression generated will look like:
even: CASE ... WHEN expr = argv[2 * i] THEN argv[2 * i + 1] ... END
odd : CASE ... WHEN expr = argv[2 * i] THEN argv[2 * i + 1] ... ELSE argv[n] END
Returns
-------
str_sql
SQL expression.
"""
n = len(argv)
if n < 2:
raise ParameterError(
"The number of arguments of the 'decode' function must be greater than 3."
)
category = get_category_from_python_type(argv[1])
expr = (
"DECODE("
+ str(format_magic(expr))
+ ", "
+ ", ".join([str(format_magic(elem)) for elem in argv])
+ ")"
)
return str_sql(expr, category)
# ---#
def degrees(expr):
"""
---------------------------------------------------------------------------
Converts Radians to Degrees.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("DEGREES({})".format(expr), "float")
# ---#
def distance(
lat0: float, lon0: float, lat1: float, lon1: float, radius: float = 6371.009
):
"""
---------------------------------------------------------------------------
Returns the distance (in kilometers) between two points.
Parameters
----------
lat0: float
Starting point latitude.
lon0: float
Starting point longitude.
lat1: float
Ending point latitude.
lon1: float
Ending point longitude.
radius: float
Specifies the radius of the curvature of the earth at the midpoint
between the starting and ending points.
Returns
-------
str_sql
SQL expression.
"""
return str_sql(
"DISTANCE({}, {}, {}, {}, {})".format(lat0, lon0, lat1, lon1, radius), "float"
)
# ---#
def exp(expr):
"""
---------------------------------------------------------------------------
Exponential.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("EXP({})".format(expr), "float")
# ---#
def extract(expr, field: str):
"""
---------------------------------------------------------------------------
Extracts a sub-field such as year or hour from a date/time expression.
Parameters
----------
expr: object
Expression.
field: str
The field to extract. It must be one of the following:
CENTURY / DAY / DECADE / DOQ / DOW / DOY / EPOCH / HOUR / ISODOW / ISOWEEK /
ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE / MONTH / QUARTER /
SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("DATE_PART('{}', {})".format(field, expr), "int")
# ---#
def factorial(expr):
"""
---------------------------------------------------------------------------
Factorial.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("({})!".format(expr), "int")
# ---#
def floor(expr):
"""
---------------------------------------------------------------------------
Floor Function.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("FLOOR({})".format(expr), "int")
# ---#
def gamma(expr):
"""
---------------------------------------------------------------------------
Gamma Function.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("({} - 1)!".format(expr), "float")
# ---#
def getdate():
"""
---------------------------------------------------------------------------
Returns the current statement's start date and time as a TIMESTAMP value.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("GETDATE()", "date")
# ---#
def getutcdate():
"""
---------------------------------------------------------------------------
Returns the current statement's start date and time at TIME ZONE 'UTC'
as a TIMESTAMP value.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("GETUTCDATE()", "date")
# ---#
def hash(*argv):
"""
---------------------------------------------------------------------------
Calculates a hash value over the function arguments.
Parameters
----------
argv: object
Infinite Number of Expressions.
Returns
-------
str_sql
SQL expression.
"""
expr = []
for arg in argv:
expr += [format_magic(arg)]
expr = ", ".join([str(elem) for elem in expr])
return str_sql("HASH({})".format(expr), "float")
# ---#
def hour(expr):
"""
---------------------------------------------------------------------------
Returns the hour portion of the specified date as an integer, where 0 is
00:00 to 00:59.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("HOUR({})".format(expr), "int")
# ---#
def interval(expr):
"""
---------------------------------------------------------------------------
Converts the input value to a INTERVAL data type.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("({})::interval".format(expr), "interval")
# ---#
def isfinite(expr):
"""
---------------------------------------------------------------------------
Returns True if the expression is finite.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr, cat = format_magic(expr, True)
return str_sql(
"(({}) = ({})) AND (ABS({}) < 'inf'::float)".format(expr, expr, expr), cat
)
# ---#
def isinf(expr):
"""
---------------------------------------------------------------------------
Returns True if the expression is infinite.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ABS({}) = 'inf'::float".format(expr), "float")
# ---#
def isnan(expr):
"""
---------------------------------------------------------------------------
Returns True if the expression is NaN.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr, cat = format_magic(expr, True)
return str_sql("(({}) != ({}))".format(expr, expr), cat)
# ---#
def lgamma(expr):
"""
---------------------------------------------------------------------------
Natural Logarithm of the expression Gamma.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LN(({} - 1)!)".format(expr), "float")
# ---#
def ln(expr):
"""
---------------------------------------------------------------------------
Natural Logarithm.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LN({})".format(expr), "float")
# ---#
def log(expr, base: int = 10):
"""
---------------------------------------------------------------------------
Logarithm.
Parameters
----------
expr: object
Expression.
base: int
Specifies the base.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("LOG({}, {})".format(base, expr), "float")
# ---#
def minute(expr):
"""
---------------------------------------------------------------------------
Returns the minute portion of the specified date as an integer.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("MINUTE({})".format(expr), "int")
# ---#
def microsecond(expr):
"""
---------------------------------------------------------------------------
Returns the microsecond portion of the specified date as an integer.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("MICROSECOND({})".format(expr), "int")
# ---#
def month(expr):
"""
---------------------------------------------------------------------------
Returns the month portion of the specified date as an integer.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("MONTH({})".format(expr), "int")
# ---#
def nullifzero(expr):
"""
---------------------------------------------------------------------------
Evaluates to NULL if the value in the expression is 0.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr, cat = format_magic(expr, True)
return str_sql("NULLIFZERO({})".format(expr), cat)
# ---#
def overlaps(start0, end0, start1, end1):
"""
---------------------------------------------------------------------------
Evaluates two time periods and returns true when they overlap, false
otherwise.
Parameters
----------
start0: object
DATE, TIME, or TIMESTAMP/TIMESTAMPTZ value that specifies the beginning
of a time period.
end0: object
DATE, TIME, or TIMESTAMP/TIMESTAMPTZ value that specifies the end of a
time period.
start1: object
DATE, TIME, or TIMESTAMP/TIMESTAMPTZ value that specifies the beginning
of a time period.
end1: object
DATE, TIME, or TIMESTAMP/TIMESTAMPTZ value that specifies the end of a
time period.
Returns
-------
str_sql
SQL expression.
"""
return str_sql(
"({}, {}) OVERLAPS ({}, {})".format(
format_magic(start0),
format_magic(end0),
format_magic(start1),
format_magic(end1),
),
"int",
)
# ---#
def quarter(expr):
"""
---------------------------------------------------------------------------
Returns calendar quarter of the specified date as an integer, where the
January-March quarter is 1.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("QUARTER({})".format(expr), "int")
# ---#
def radians(expr):
"""
---------------------------------------------------------------------------
Converts Degrees to Radians.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("RADIANS({})".format(expr), "float")
# ---#
def random():
"""
---------------------------------------------------------------------------
Returns a Random Number.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("RANDOM()", "float")
# ---#
def randomint(n: int):
"""
---------------------------------------------------------------------------
Returns a Random Number from 0 through n – 1.
Parameters
----------
n: int
Integer Value.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("RANDOMINT({})".format(n), "int")
# ---#
def round(expr, places: int = 0):
"""
---------------------------------------------------------------------------
Rounds the expression.
Parameters
----------
expr: object
Expression.
places: int
Number used to round the expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ROUND({}, {})".format(expr, places), "float")
# ---#
def round_date(expr, precision: str = "DD"):
"""
---------------------------------------------------------------------------
Rounds the specified date or time.
Parameters
----------
expr: object
Expression.
precision: str, optional
A string constant that specifies precision for the rounded value,
one of the following:
Century: CC | SCC
Year: SYYY | YYYY | YEAR | YYY | YY | Y
ISO Year: IYYY | IYY | IY | I
Quarter: Q
Month: MONTH | MON | MM | RM
Same weekday as first day of year: WW
Same weekday as first day of ISO year: IW
Same weekday as first day of month: W
Day (default): DDD | DD | J
First weekday: DAY | DY | D
Hour: HH | HH12 | HH24
Minute: MI
Second: SS
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("ROUND({}, '{}')".format(expr, precision), "date")
# ---#
def second(expr):
"""
---------------------------------------------------------------------------
Returns the seconds portion of the specified date as an integer.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("SECOND({})".format(expr), "int")
# ---#
def seeded_random(random_state: int):
"""
---------------------------------------------------------------------------
Returns a Seeded Random Number using the input random state.
Parameters
----------
random_state: int
Integer used to seed the randomness.
Returns
-------
str_sql
SQL expression.
"""
return str_sql("SEEDED_RANDOM({})".format(random_state), "float")
# ---#
def sign(expr):
"""
---------------------------------------------------------------------------
Sign of the expression.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("SIGN({})".format(expr), "int")
# ---#
def sin(expr):
"""
---------------------------------------------------------------------------
Trigonometric Sine.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("SIN({})".format(expr), "float")
# ---#
def sinh(expr):
"""
---------------------------------------------------------------------------
Hyperbolic Sine.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("SINH({})".format(expr), "float")
# ---#
def sqrt(expr):
"""
---------------------------------------------------------------------------
Arithmetic Square Root.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("SQRT({})".format(expr), "float")
# ---#
def tan(expr):
"""
---------------------------------------------------------------------------
Trigonometric Tangent.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("TAN({})".format(expr), "float")
# ---#
def tanh(expr):
"""
---------------------------------------------------------------------------
Hyperbolic Tangent.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("TANH({})".format(expr), "float")
# ---#
def timestamp(expr):
"""
---------------------------------------------------------------------------
Converts the input value to a TIMESTAMP data type.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("({})::timestamp".format(expr), "date")
# ---#
def trunc(expr, places: int = 0):
"""
---------------------------------------------------------------------------
Truncates the expression.
Parameters
----------
expr: object
Expression.
places: int
Number used to truncate the expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("TRUNC({}, {})".format(expr, places), "float")
# ---#
def week(expr):
"""
---------------------------------------------------------------------------
Returns the week of the year for the specified date as an integer, where the
first week begins on the first Sunday on or preceding January 1.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("WEEK({})".format(expr), "int")
# ---#
def year(expr):
"""
---------------------------------------------------------------------------
Returns an integer that represents the year portion of the specified date.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
return str_sql("YEAR({})".format(expr), "int")
# ---#
def zeroifnull(expr):
"""
---------------------------------------------------------------------------
Evaluates to 0 if the expression is NULL.
Parameters
----------
expr: object
Expression.
Returns
-------
str_sql
SQL expression.
"""
expr, cat = format_magic(expr, True)
return str_sql("ZEROIFNULL({})".format(expr), cat)
| 20.400995
| 98
| 0.507767
|
b81268d0b948acbd3dcfcf935257f7d44cdb2ab1
| 1,763
|
py
|
Python
|
tools/ci_c.py
|
csirosat/kubos
|
fa5db37d5ecef479ebbbe4e69fbf765566eccdee
|
[
"Apache-2.0"
] | 266
|
2018-01-10T04:41:19.000Z
|
2022-03-29T19:59:43.000Z
|
tools/ci_c.py
|
csirosat/kubos
|
fa5db37d5ecef479ebbbe4e69fbf765566eccdee
|
[
"Apache-2.0"
] | 444
|
2018-01-09T18:08:26.000Z
|
2022-03-30T02:00:04.000Z
|
tools/ci_c.py
|
csirosat/kubos
|
fa5db37d5ecef479ebbbe4e69fbf765566eccdee
|
[
"Apache-2.0"
] | 60
|
2018-02-03T02:19:12.000Z
|
2022-03-29T19:13:16.000Z
|
#!/usr/bin/env python3
import subprocess
import os
import shutil
projects = [
"./cmocka",
"./ccan/json",
"./examples/adc-thermistor",
"./examples/kubos-linux-uarttx",
"./examples/kubos-linux-uartrx",
"./examples/kubos-linux-example",
"./examples/kubos-linux-tcprx",
"./examples/rust-c-service/extern-lib",
"./examples/kubos-linux-tcptx",
"./test/integration/linux/iobc-supervisor-test",
"./test/integration/linux/isis-imtq",
"./test/integration/linux/bme280-spi",
"./test/integration/linux/isis-trxvu/radio-test",
"./test/integration/linux/isis-ants",
"./test/integration/linux/nanopower-p31u",
"./test/integration/linux/lsm303dlhc-i2c",
"./test/integration/linux/hello-world",
"./hal/kubos-hal",
"./apis/gomspace-p31u-api",
"./apis/isis-ants-api",
"./apis/isis-imtq-api",
"./apis/isis-trxvu-api",
"./apis/isis-iobc-supervisor",
]
def clean(dir):
build_dir = "build"
shutil.rmtree(build_dir, ignore_errors=True)
def build(dir):
build_dir = "build"
cmake_dir = "../{}".format(dir)
os.mkdir(build_dir)
subprocess.run(["cmake", cmake_dir], cwd=build_dir, check=True)
subprocess.run(["make"], cwd=build_dir, check=True)
def run_test(dir):
build_dir = "build"
os.environ["CTEST_OUTPUT_ON_FAILURE"] = "1"
subprocess.run(["make", "test"], cwd=build_dir, check=True)
def test(dir):
test_dir = "{}/test".format(dir)
if os.path.isdir(test_dir):
clean(test_dir)
build(test_dir)
run_test(test_dir)
def main():
print("Building C projects")
for dir in projects:
print("Testing {}".format(dir))
clean(dir)
build(dir)
test(dir)
if __name__ == '__main__':
main()
| 26.313433
| 67
| 0.631877
|
ed53e2fb8673f501bc76e893b0b4a0b8eec8034b
| 28,744
|
py
|
Python
|
src/models/backbone_kpconv/kpconv_blocks.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | 25
|
2022-03-28T06:26:16.000Z
|
2022-03-30T14:21:24.000Z
|
src/models/backbone_kpconv/kpconv_blocks.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | null | null | null |
src/models/backbone_kpconv/kpconv_blocks.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | 2
|
2022-03-29T09:37:50.000Z
|
2022-03-30T06:26:35.000Z
|
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Define network blocks
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 06/03/2020
#
# Modified by Zi Jian Yew to apply instance normalization on each point cloud
# separately
import logging
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.init import kaiming_uniform_
from .kernels.kernel_points import load_kernels
_logger = logging.getLogger(__name__)
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.avg = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.avg = self.total_time / self.calls
if average:
return self.avg
else:
return self.diff
def gather(x, idx, method=2):
"""
implementation of a custom gather operation for faster backwards.
:param x: input with shape [N, D_1, ... D_d]
:param idx: indexing with shape [n_1, ..., n_m]
:param method: Choice of the method
:return: x[idx] with shape [n_1, ..., n_m, D_1, ... D_d]
"""
if method == 0:
return x[idx]
elif method == 1:
x = x.unsqueeze(1)
x = x.expand((-1, idx.shape[-1], -1))
idx = idx.unsqueeze(2)
idx = idx.expand((-1, -1, x.shape[-1]))
return x.gather(0, idx)
elif method == 2:
for i, ni in enumerate(idx.size()[1:]):
x = x.unsqueeze(i + 1)
new_s = list(x.size())
new_s[i + 1] = ni
x = x.expand(new_s)
n = len(idx.size())
for i, di in enumerate(x.size()[n:]):
idx = idx.unsqueeze(i + n)
new_s = list(idx.size())
new_s[i + n] = di
idx = idx.expand(new_s)
return x.gather(0, idx)
else:
raise ValueError('Unkown method')
def radius_gaussian(sq_r, sig, eps=1e-9):
"""
Compute a radius gaussian (gaussian of distance)
:param sq_r: input radiuses [dn, ..., d1, d0]
:param sig: extents of gaussians [d1, d0] or [d0] or float
:return: gaussian of sq_r [dn, ..., d1, d0]
"""
return torch.exp(-sq_r / (2 * sig ** 2 + eps))
def closest_pool(x, inds):
"""
Pools features from the closest neighbors. WARNING: this function assumes the neighbors are ordered.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] Only the first column is used for pooling
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get features for each pooling location [n2, d]
return gather(x, inds[:, 0])
def max_pool(x, inds):
"""
Pools features with the maximum values.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] pooling indices
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get all features for each pooling location [n2, max_num, d]
pool_features = gather(x, inds)
# Pool the maximum [n2, d]
max_features, _ = torch.max(pool_features, 1)
return max_features
def global_average(x, batch_lengths):
"""
Block performing a global average over batch pooling
:param x: [N, D] input features
:param batch_lengths: [B] list of batch lengths
:return: [B, D] averaged features
"""
# Loop over the clouds of the batch
averaged_features = []
i0 = 0
for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
# Increment for next cloud
i0 += length
# Average features in each batch
return torch.stack(averaged_features)
# ----------------------------------------------------------------------------------------------------------------------
#
# KPConv class
# \******************/
#
class KPConv(nn.Module):
def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius,
fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum',
deformable=False, modulated=False):
"""
Initialize parameters for KPConvDeformable.
:param kernel_size: Number of kernel points.
:param p_dim: dimension of the point space.
:param in_channels: dimension of input features.
:param out_channels: dimension of output features.
:param KP_extent: influence radius of each kernel point.
:param radius: radius used for kernel point init. Even for deformable, use the config.conv_radius
:param fixed_kernel_points: fix position of certain kernel points ('none', 'center' or 'verticals').
:param KP_influence: influence function of the kernel points ('constant', 'linear', 'gaussian').
:param aggregation_mode: choose to sum influences, or only keep the closest ('closest', 'sum').
:param deformable: choose deformable or not
:param modulated: choose if kernel weights are modulated in addition to deformed
"""
super(KPConv, self).__init__()
# Save parameters
self.K = kernel_size
self.p_dim = p_dim
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.KP_extent = KP_extent
self.fixed_kernel_points = fixed_kernel_points
self.KP_influence = KP_influence
self.aggregation_mode = aggregation_mode
self.deformable = deformable
self.modulated = modulated
# Running variable containing deformed KP distance to input points. (used in regularization loss)
self.min_d2 = None
self.deformed_KP = None
self.offset_features = None
# Initialize weights
self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
requires_grad=True)
# Initiate weights for offsets
if deformable:
if modulated:
self.offset_dim = (self.p_dim + 1) * self.K
else:
self.offset_dim = self.p_dim * self.K
self.offset_conv = KPConv(self.K,
self.p_dim,
self.in_channels,
self.offset_dim,
KP_extent,
radius,
fixed_kernel_points=fixed_kernel_points,
KP_influence=KP_influence,
aggregation_mode=aggregation_mode)
self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True)
else:
self.offset_dim = None
self.offset_conv = None
self.offset_bias = None
# Reset parameters
self.reset_parameters()
# Initialize kernel points
self.kernel_points = self.init_KP()
return
def reset_parameters(self):
kaiming_uniform_(self.weights, a=math.sqrt(5))
if self.deformable:
nn.init.zeros_(self.offset_bias)
return
def init_KP(self):
"""
Initialize the kernel point positions in a sphere
:return: the tensor of kernel points
"""
# Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
K_points_numpy = load_kernels(self.radius,
self.K,
dimension=self.p_dim,
fixed=self.fixed_kernel_points)
return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
requires_grad=False)
def forward(self, q_pts, s_pts, neighb_inds, x):
###################
# Offset generation
###################
if self.deformable:
# Get offsets with a KPConv that only takes part of the features
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
if self.modulated:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features[:, :self.p_dim * self.K]
unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
# Get modulations
modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:])
else:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
# No modulations
modulations = None
# Rescale offset for this layer
offsets = unscaled_offsets * self.KP_extent
else:
offsets = None
modulations = None
######################
# Deformed convolution
######################
# Add a fake point in the last row for shadow neighbors
s_pts = torch.cat((s_pts, torch.zeros_like(s_pts[:1, :]) + 1e6), 0)
# Get neighbor points [n_points, n_neighbors, dim]
neighbors = s_pts[neighb_inds, :]
# Center every neighborhood
neighbors = neighbors - q_pts.unsqueeze(1)
# Apply offsets to kernel points [n_points, n_kpoints, dim]
if self.deformable:
self.deformed_KP = offsets + self.kernel_points
deformed_K_points = self.deformed_KP.unsqueeze(1)
else:
deformed_K_points = self.kernel_points
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors.unsqueeze_(2)
differences = neighbors - deformed_K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, dim=3)
# Optimization by ignoring points outside a deformed KP range
if self.deformable:
# Save distances for loss
self.min_d2, _ = torch.min(sq_distances, dim=1)
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
# New value of max neighbors
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
# Gather new neighbor indices [n_points, new_max_neighb]
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
# Gather new distances to KP [n_points, new_max_neighb, n_kpoints]
neighb_row_inds.unsqueeze_(2)
neighb_row_inds = neighb_row_inds.expand(-1, -1, self.K)
sq_distances = sq_distances.gather(1, neighb_row_inds, sparse_grad=False)
# New shadow neighbors have to point to the last shadow point
new_neighb_inds *= neighb_row_bool
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
else:
new_neighb_inds = neighb_inds
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if self.KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = self.KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = torch.transpose(all_weights, 1, 2)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if self.aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, dim=2)
all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2)
elif self.aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
# Add a zero feature for shadow neighbors
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighb_x = gather(x, new_neighb_inds)
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighb_x)
# Apply modulations
if self.deformable and self.modulated:
weighted_features *= modulations.unsqueeze(2)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute((1, 0, 2))
kernel_outputs = torch.matmul(weighted_features, self.weights)
# Convolution sum [n_points, out_fdim]
# return torch.sum(kernel_outputs, dim=0)
output_features = torch.sum(kernel_outputs, dim=0, keepdim=False)
# normalization term.
neighbor_features_sum = torch.sum(neighb_x, dim=-1)
neighbor_num = torch.sum(torch.gt(neighbor_features_sum, 0.0), dim=-1)
neighbor_num = torch.max(neighbor_num, torch.ones_like(neighbor_num))
output_features = output_features / neighbor_num.unsqueeze(1)
return output_features
def __repr__(self):
return 'KPConv(radius: {:.2f}, extent: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius,
self.KP_extent,
self.in_channels,
self.out_channels)
# ----------------------------------------------------------------------------------------------------------------------
#
# Complex blocks
# \********************/
#
def block_decider(block_name,
radius,
in_dim,
out_dim,
layer_ind,
config):
if block_name == 'unary':
return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum)
elif block_name == 'unary2':
return UnaryBlock2(in_dim, out_dim)
elif block_name in ['simple',
'simple_deformable',
'simple_invariant',
'simple_equivariant',
'simple_strided',
'simple_deformable_strided',
'simple_invariant_strided',
'simple_equivariant_strided']:
return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name in ['resnetb',
'resnetb_invariant',
'resnetb_equivariant',
'resnetb_deformable',
'resnetb_strided',
'resnetb_deformable_strided',
'resnetb_equivariant_strided',
'resnetb_invariant_strided']:
return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name == 'max_pool' or block_name == 'max_pool_wide':
return MaxPoolBlock(layer_ind)
elif block_name == 'global_average':
return GlobalAverageBlock()
elif block_name == 'nearest_upsample':
return NearestUpsampleBlock(layer_ind)
else:
raise ValueError('Unknown block name in the architecture definition : ' + block_name)
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
# self.norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
self.norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x, stack_lengths):
assert x.shape[0] == stack_lengths.sum()
if self.use_bn:
if isinstance(self.norm, nn.BatchNorm1d):
x = x.unsqueeze(2)
x = x.transpose(0, 2) # (1, C, L)
x = self.norm(x)
x = x.transpose(0, 2)
return x.squeeze()
elif isinstance(self.norm, nn.InstanceNorm1d):
B = len(stack_lengths)
b_start_end = F.pad(torch.cumsum(stack_lengths, dim=0), (1, 0))
x = x.unsqueeze(2)
x = x.transpose(0, 2) # (1, C, L)
x = torch.cat([self.norm(x[:, :, b_start_end[b]:b_start_end[b+1]])
for b in range(B)], dim=2)
x = x.transpose(0, 2)
return x.squeeze()
else:
raise NotImplementedError
else:
return x + self.bias
def __repr__(self):
return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim,
self.bn_momentum,
str(not self.use_bn))
class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, stack_lengths=None):
x = self.mlp(x)
x = self.batch_norm(x, stack_lengths)
if not self.no_relu:
x = self.leaky_relu(x)
return x
def __repr__(self):
return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim,
self.out_dim,
str(self.use_bn),
str(not self.no_relu))
class UnaryBlock2(nn.Module):
"""Just a MLP"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(in_dim, in_dim),
nn.ReLU(),
nn.Linear(in_dim, out_dim)
)
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, x):
return self.mlp(x)
def __repr__(self):
return 'UnaryBlock2(in_feat: {:d}, out_feat: {:d})'.format(self.in_dim, self.out_dim)
class SimpleBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a simple convolution block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(SimpleBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.layer_ind = layer_ind
self.block_name = block_name
self.in_dim = in_dim
self.out_dim = out_dim
# Define the KPConv class
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
in_dim,
out_dim // 2,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
# Other opperations
self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
stack_lengths = batch['stack_lengths'][self.layer_ind + 1]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
stack_lengths = batch['stack_lengths'][self.layer_ind]
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
return self.leaky_relu(self.batch_norm(x, stack_lengths))
class ResnetBottleneckBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a resnet bottleneck block.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(ResnetBottleneckBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.block_name = block_name
self.layer_ind = layer_ind
self.in_dim = in_dim
self.out_dim = out_dim
# First downscaling mlp
if in_dim != out_dim // 4:
self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
else:
self.unary1 = nn.Identity()
# KPConv block
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
out_dim // 4,
out_dim // 4,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
# Second upscaling mlp
self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
# Shortcut optional mpl
if in_dim != out_dim:
self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
else:
self.unary_shortcut = nn.Identity()
# Other operations
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, features, batch):
stack_lengths_pre = batch['stack_lengths'][self.layer_ind]
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
stack_lengths_post = batch['stack_lengths'][self.layer_ind + 1]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
stack_lengths_post = batch['stack_lengths'][self.layer_ind]
# First downscaling mlp
x = self.unary1(features, stack_lengths_pre) if isinstance(self.unary1, UnaryBlock) else \
self.unary1(features)
# Convolution
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
x = self.leaky_relu(self.batch_norm_conv(x, stack_lengths_post))
# Second upscaling mlp
x = self.unary2(x, stack_lengths_post) if isinstance(self.unary2, UnaryBlock) else \
self.unary2(x)
# Shortcut
if 'strided' in self.block_name:
shortcut = max_pool(features, neighb_inds)
else:
shortcut = features
shortcut = self.unary_shortcut(shortcut, stack_lengths_post) if isinstance(self.unary_shortcut, UnaryBlock) else \
self.unary_shortcut(shortcut)
return self.leaky_relu(x + shortcut)
class GlobalAverageBlock(nn.Module):
def __init__(self):
"""
Initialize a global average block with its ReLU and BatchNorm.
"""
super(GlobalAverageBlock, self).__init__()
return
def forward(self, x, batch):
return global_average(x, batch['stack_lengths'][-1])
class NearestUpsampleBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a nearest upsampling block with its ReLU and BatchNorm.
"""
super(NearestUpsampleBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return closest_pool(x, batch['upsamples'][self.layer_ind - 1])
def __repr__(self):
return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind,
self.layer_ind - 1)
class MaxPoolBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a max pooling block with its ReLU and BatchNorm.
"""
super(MaxPoolBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return max_pool(x, batch['pools'][self.layer_ind + 1])
| 36.523507
| 122
| 0.566936
|
819c90ed16fff52b1673bc241322f561a26959c4
| 20,796
|
py
|
Python
|
assignment2/cs231n/classifiers/fc_net.py
|
pranav-s/Stanford_CS234_CV_2017
|
9b0536812477dd0ea0e2dc4f063976a2e79148cc
|
[
"MIT"
] | null | null | null |
assignment2/cs231n/classifiers/fc_net.py
|
pranav-s/Stanford_CS234_CV_2017
|
9b0536812477dd0ea0e2dc4f063976a2e79148cc
|
[
"MIT"
] | null | null | null |
assignment2/cs231n/classifiers/fc_net.py
|
pranav-s/Stanford_CS234_CV_2017
|
9b0536812477dd0ea0e2dc4f063976a2e79148cc
|
[
"MIT"
] | null | null | null |
from builtins import range
from builtins import object
import numpy as np
from ..layers import *
from ..layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(
self,
input_dim=3 * 32 * 32,
hidden_dim=100,
num_classes=10,
weight_scale=1e-3,
reg=0.0,
):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian centered at 0.0 with #
# standard deviation equal to weight_scale, and biases should be #
# initialized to zero. All weights and biases should be stored in the #
# dictionary self.params, with first layer weights #
# and biases using the keys 'W1' and 'b1' and second layer #
# weights and biases using the keys 'W2' and 'b2'. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.params['b1']=np.zeros(hidden_dim)
self.params['b2']=np.zeros(num_classes)
self.params['W1'] = np.array([[np.random.normal(scale=weight_scale) for i in range(hidden_dim)] for j in range(input_dim)])
self.params['W2'] = np.array([[np.random.normal(scale=weight_scale) for i in range(num_classes)] for j in range(hidden_dim)])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
layer_1_out, _ = affine_forward(X, self.params['W1'], self.params['b1'])
layer_1_nonlinear, _ = relu_forward(layer_1_out)
scores, _ = affine_forward(layer_1_nonlinear, self.params['W2'], self.params['b2'])
# scores = softmax_forward(layer_2_out)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
loss, grads = 0, {}
if y is None:
return scores
else:
loss, softmax_grads = softmax_loss(scores, y)
loss_regularized = loss + 0.5*self.reg*(np.linalg.norm(self.params['W1'])**2+np.linalg.norm(self.params['W2'])**2)
#print(softmax_grads.shape)
#print(layer_1_nonlinear.shape)
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
hidden_grad, grads['W2'], grads['b2'] = affine_backward(softmax_grads, (layer_1_nonlinear, self.params['W2'], self.params['b2'], self.reg))
hidden_grad_relu = relu_backward(hidden_grad, layer_1_out)
#print('Shape of hidden grad_relu ', hidden_grad_relu.shape)
#print('Shape of X ',X.shape)
grad_input, grads['W1'], grads['b1'] = affine_backward(hidden_grad_relu, (X, self.params['W1'], self.params['b1'], self.reg))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss_regularized, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch/layer normalization as options. For a network with L layers,
the architecture will be
{affine - [batch/layer norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch/layer normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(
self,
hidden_dims,
input_dim=3 * 32 * 32,
num_classes=10,
dropout=1,
normalization=None,
reg=0.0,
weight_scale=1e-2,
dtype=np.float32,
seed=None,
):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=1 then
the network should not use dropout at all.
- normalization: What type of normalization the network should use. Valid values
are "batchnorm", "layernorm", or None for no normalization (the default).
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for randomconnected
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.normalization = normalization
self.use_dropout = dropout != 1
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution centered at 0 with standard #
# deviation equal to weight_scale. Biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to ones and shift #
# parameters should be initialized to zeros. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
current_layer_index = 1
layer_input = input_dim
for h in hidden_dims:
self.params['b'+str(current_layer_index)] = np.zeros(h)
self.params['W'+str(current_layer_index)] = np.array([[np.random.normal(scale=weight_scale) for i in range(h)] for j in range(layer_input)])
self.params['gamma'+str(current_layer_index)] = np.ones(h)
self.params['beta'+str(current_layer_index)] = np.zeros(h)
layer_input = h
current_layer_index+=1
self.params['b'+str(current_layer_index)] = np.zeros(num_classes)
self.params['W'+str(current_layer_index)] = np.array([[np.random.normal(scale=weight_scale) for i in range(num_classes)] for j in range(layer_input)])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {"mode": "train", "p": dropout}
if seed is not None:
self.dropout_param["seed"] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.normalization == "batchnorm":
self.bn_params = [{"mode": "train"} for i in range(self.num_layers - 1)]
if self.normalization == "layernorm":
self.bn_params = [{} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
# for k, v in self.params.items():
# self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = "test" if y is None else "train"
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param["mode"] = mode
if self.normalization == "batchnorm":
for bn_param in self.bn_params:
bn_param["mode"] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
layer_input = X
layer_nonlinear_dict = {}
layer_dict = {}
layer_droput_dict = {}
for layer in range(1, self.num_layers):
if self.normalization is not None:
layer_out, cache = affine_norm_relu_forward(layer_input, self.params['W'+str(layer)], self.params['b'+str(layer)],
self.normalization, self.params['gamma'+str(layer)], self.params['beta'+str(layer)], self.bn_params[layer-1])
fc_cache, norm_cache, relu_cache = cache
fc_cache = (*fc_cache, self.reg)
cache = (fc_cache, norm_cache, relu_cache)
layer_dict[layer] = cache
else:
layer_out, cache = affine_relu_forward(layer_input, self.params['W'+str(layer)], self.params['b'+str(layer)])
fc_cache, relu_cache = cache
fc_cache = (*fc_cache, self.reg)
cache = (fc_cache, relu_cache)
layer_dict[layer] = cache
if self.use_dropout:
layer_dropout, cache = dropout_forward(layer_out, self.dropout_param)
layer_droput_dict[layer] = cache
layer_input = layer_dropout
else:
layer_input = layer_out
scores, _ = affine_forward(layer_input, self.params['W'+str(self.num_layers)], self.params['b'+str(self.num_layers)])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == "test":
return scores
loss, grads = 0.0, {}
loss, softmax_grads = softmax_loss(scores, y)
loss_regularized = loss + 0.5*self.reg*(np.sum([np.linalg.norm(self.params['W'+str(layer)])**2 for layer in range(1, self.num_layers+1)]))
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch/layer normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
hidden_grad = softmax_grads
#print(hidden_grad.shape)
hidden_grad, grads['W'+str(self.num_layers)], grads['b'+str(self.num_layers)] = affine_backward(hidden_grad, (layer_input, self.params['W'+str(self.num_layers)], self.params['b'+str(self.num_layers)], self.reg))
#print(hidden_grad.shape)
for layer in range(self.num_layers, 1, -1):
if self.use_dropout:
hidden_grad = dropout_backward(hidden_grad, layer_droput_dict[layer-1])
if self.normalization is not None:
hidden_grad, grads['W'+str(layer-1)], grads['b'+str(layer-1)], grads['gamma'+str(layer-1)], grads['beta'+str(layer-1)] = affine_norm_relu_backward(hidden_grad, self.normalization, layer_dict[layer-1])
else:
hidden_grad, grads['W'+str(layer-1)], grads['b'+str(layer-1)] = affine_relu_backward(hidden_grad, layer_dict[layer-1])
#print(hidden_grad.shape)
# hidden_grad = hidden_grad_input
# hidden_grad_non_linear = relu_backward(hidden_grad, layer_dict[layer-1])
# grad_input, grads['W1'], grads['b1'] = affine_backward(hidden_grad_non_linear, (X, self.params['W1'], self.params['b1'], self.reg))
#print('Shape of hidden grad_relu ', hidden_grad_relu.shape)
#print('Shape of X ',X.shape)
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss_regularized, grads
def affine_norm_relu_forward(x, w, b, normalization, gamma, beta, bn_param):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
# print(x.shape, w.shape, b.shape)
a, fc_cache = affine_forward(x, w, b)
if normalization=='batchnorm':
norm_out, norm_cache = batchnorm_forward(a, gamma, beta, bn_param)
elif normalization=='layernorm':
norm_out, norm_cache = layernorm_forward(a, gamma, beta, bn_param)
out, relu_cache = relu_forward(norm_out)
cache = (fc_cache, norm_cache, relu_cache)
return out, cache
def affine_norm_relu_backward(dout, normalization, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, norm_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
if normalization == 'batchnorm':
dx, dgamma, dbeta = batchnorm_backward_alt(da, norm_cache)
if normalization == 'layernorm':
dx, dgamma, dbeta = layernorm_backward(da, norm_cache)
dx, dw, db = affine_backward(dx, fc_cache)
return dx, dw, db, dgamma, dbeta
| 51.475248
| 219
| 0.533276
|
3352ae4c6a028b1e86d38a9164d338229b57feca
| 3,709
|
py
|
Python
|
chexpert_labeler/stages/aggregate.py
|
stmharry/interpretable-report-gen
|
c4518b613526f76ef84e1dc58b4939b315e80cd9
|
[
"MIT"
] | 2
|
2020-12-16T07:43:57.000Z
|
2021-07-09T01:15:30.000Z
|
chexpert_labeler/stages/aggregate.py
|
stmharry/interpretable-report-gen
|
c4518b613526f76ef84e1dc58b4939b315e80cd9
|
[
"MIT"
] | 3
|
2021-06-08T21:34:27.000Z
|
2021-09-08T02:02:28.000Z
|
chexpert_labeler/stages/aggregate.py
|
stmharry/interpretable-report-gen
|
c4518b613526f76ef84e1dc58b4939b315e80cd9
|
[
"MIT"
] | null | null | null |
"""Define mention aggregator class."""
import numpy as np
from tqdm import tqdm
from chexpert_labeler.constants import *
class Aggregator(object):
"""Aggregate mentions of observations from radiology reports."""
def __init__(self, categories, verbose=False):
self.categories = categories
self.verbose = verbose
def dict_to_vec(self, d):
"""
Convert a dictionary of the form
{cardiomegaly: [1],
opacity: [u, 1],
fracture: [0]}
into a vector of the form
[np.nan, np.nan, 1, u, np.nan, ..., 0, np.nan]
"""
vec = []
for category in self.categories:
# There was a mention of the category.
if category in d:
label_list = d[category]
# Only one label, no conflicts.
if len(label_list) == 1:
vec.append(label_list[0])
# Multiple labels.
else:
# Case 1. There is negated and uncertain.
if NEGATIVE in label_list and UNCERTAIN in label_list:
vec.append(UNCERTAIN)
# Case 2. There is negated and positive.
elif NEGATIVE in label_list and POSITIVE in label_list:
vec.append(POSITIVE)
# Case 3. There is uncertain and positive.
elif UNCERTAIN in label_list and POSITIVE in label_list:
vec.append(POSITIVE)
# Case 4. All labels are the same.
else:
vec.append(label_list[0])
# No mention of the category
else:
vec.append(np.nan)
return vec
def aggregate(self, collection):
labels = []
documents = collection.documents
if self.verbose:
print("Aggregating mentions...")
documents = tqdm(documents)
for document in documents:
label_dict = {}
impression_passage = document.passages[0]
no_finding = True
for annotation in impression_passage.annotations:
category = annotation.infons[OBSERVATION]
if NEGATION in annotation.infons:
label = NEGATIVE
elif UNCERTAINTY in annotation.infons:
label = UNCERTAIN
else:
label = POSITIVE
# If at least one non-support category has a uncertain or
# positive label, there was a finding
if (category != SUPPORT_DEVICES and
label in [UNCERTAIN, POSITIVE]):
no_finding = False
# Don't add any labels for No Finding
if category == NO_FINDING:
continue
# add exception for 'chf' and 'heart failure'
if ((label in [UNCERTAIN, POSITIVE]) and
(annotation.text == 'chf' or
annotation.text == 'heart failure')):
if CARDIOMEGALY not in label_dict:
label_dict[CARDIOMEGALY] = [UNCERTAIN]
else:
label_dict[CARDIOMEGALY].append(UNCERTAIN)
if category not in label_dict:
label_dict[category] = [label]
else:
label_dict[category].append(label)
if no_finding:
label_dict[NO_FINDING] = [POSITIVE]
label_vec = self.dict_to_vec(label_dict)
labels.append(label_vec)
return np.array(labels)
| 34.342593
| 76
| 0.512807
|
4a0bc894f8c9e6aae7aeb0032ff235c583419c4b
| 6,048
|
py
|
Python
|
zaifapi/exchange_api/trade.py
|
techbureau/zaifapi
|
5b7db7d6abdc76b4e911a74457140b3faf0b7317
|
[
"MIT"
] | 62
|
2017-05-10T12:24:48.000Z
|
2021-03-17T07:03:29.000Z
|
zaifapi/exchange_api/trade.py
|
techbureau/zaifapi
|
5b7db7d6abdc76b4e911a74457140b3faf0b7317
|
[
"MIT"
] | 15
|
2017-06-12T07:12:14.000Z
|
2020-01-30T13:28:53.000Z
|
zaifapi/exchange_api/trade.py
|
techbureau/zaifapi
|
5b7db7d6abdc76b4e911a74457140b3faf0b7317
|
[
"MIT"
] | 19
|
2017-08-23T20:47:14.000Z
|
2018-11-21T10:01:06.000Z
|
import time
import hmac
import hashlib
from decimal import Decimal
from datetime import datetime
from abc import ABCMeta, abstractmethod
from typing import Optional
from urllib.parse import urlencode
from zaifapi.api_common import ApiUrl, get_response, get_api_url, method_name
from zaifapi.api_error import ZaifApiError, ZaifApiNonceError
from . import ZaifExchangeApi
class _ZaifTradeApiBase(ZaifExchangeApi, metaclass=ABCMeta):
@abstractmethod
def _get_header(self, params):
raise NotImplementedError()
@staticmethod
def _get_nonce():
now = datetime.now()
nonce = str(int(time.mktime(now.timetuple())))
microseconds = "{0:06d}".format(now.microsecond)
return Decimal(nonce + "." + microseconds)
def _execute_api(self, func_name, schema_keys=None, params=None):
schema_keys = schema_keys or []
params = params or {}
params = self._params_pre_processing(schema_keys, params, func_name)
header = self._get_header(params)
url = self._url.get_absolute_url()
res = get_response(url, params, header)
if res["success"] == 0:
if res["error"].startswith("nonce"):
raise ZaifApiNonceError(res["error"])
raise ZaifApiError(res["error"])
return res["return"]
def _params_pre_processing(self, keys, params, func_name):
params = self._validator.params_pre_processing(keys, params)
params["method"] = func_name
params["nonce"] = self._get_nonce()
return urlencode(params)
def _make_signature(key, secret, params):
signature = hmac.new(bytearray(secret.encode("utf-8")), digestmod=hashlib.sha512)
signature.update(params.encode("utf-8"))
return {"key": key, "sign": signature.hexdigest()}
class ZaifTradeApi(_ZaifTradeApiBase):
def __init__(self, key, secret, api_url=None):
super().__init__(get_api_url(api_url, "tapi"))
self._key = key
self._secret = secret
def _get_header(self, params):
return _make_signature(self._key, self._secret, params)
def get_info(self):
return self._execute_api(method_name())
def get_info2(self):
return self._execute_api(method_name())
def get_personal_info(self):
return self._execute_api(method_name())
def get_id_info(self):
return self._execute_api(method_name())
def trade_history(self, **kwargs):
schema_keys = [
"from_num",
"count",
"from_id",
"end_id",
"order",
"since",
"end",
"currency_pair",
"is_token",
]
return self._execute_api(method_name(), schema_keys, kwargs)
def active_orders(self, **kwargs):
schema_keys = ["currency_pair", "is_token", "is_token_both"]
return self._execute_api(method_name(), schema_keys, kwargs)
def _inner_history_api(self, func_name, kwargs):
schema_keys = [
"currency",
"from_num",
"count",
"from_id",
"end_id",
"order",
"since",
"end",
"is_token",
]
return self._execute_api(func_name, schema_keys, kwargs)
def withdraw_history(self, **kwargs):
return self._inner_history_api(method_name(), kwargs)
def deposit_history(self, **kwargs):
return self._inner_history_api(method_name(), kwargs)
def withdraw(self, **kwargs):
schema_keys = ["currency", "address", "message", "amount", "opt_fee"]
return self._execute_api(method_name(), schema_keys, kwargs)
def cancel_order(self, **kwargs):
schema_keys = ["order_id", "is_token", "currency_pair"]
return self._execute_api(method_name(), schema_keys, kwargs)
def trade(self, **kwargs):
schema_keys = ["currency_pair", "action", "price", "amount", "limit", "comment"]
return self._execute_api(method_name(), schema_keys, kwargs)
class ZaifLeverageTradeApi(_ZaifTradeApiBase):
def __init__(self, key, secret, api_url=None):
api_url = get_api_url(api_url, "tlapi")
super().__init__(api_url)
self._key = key
self._secret = secret
def _get_header(self, params):
return _make_signature(self._key, self._secret, params)
def get_positions(self, **kwargs):
schema_keys = [
"type",
"group_id",
"from_num",
"count",
"from_id",
"end_id",
"order",
"since",
"end",
"currency_pair",
]
return self._execute_api(method_name(), schema_keys, kwargs)
def position_history(self, **kwargs):
schema_keys = ["type", "group_id", "leverage_id"]
return self._execute_api(method_name(), schema_keys, kwargs)
def active_positions(self, **kwargs):
schema_keys = ["type", "group_id", "currency_pair"]
return self._execute_api(method_name(), schema_keys, kwargs)
def create_position(self, **kwargs):
schema_keys = [
"type",
"group_id",
"currency_pair",
"action",
"price",
"amount",
"leverage",
"limit",
"stop",
]
return self._execute_api(method_name(), schema_keys, kwargs)
def change_position(self, **kwargs):
schema_keys = ["type", "group_id", "leverage_id", "price", "limit", "stop"]
return self._execute_api(method_name(), schema_keys, kwargs)
def cancel_position(self, **kwargs):
schema_keys = ["type", "group_id", "leverage_id"]
return self._execute_api(method_name(), schema_keys, kwargs)
class ZaifTokenTradeApi(ZaifTradeApi):
def __init__(self, token: str, api_url: Optional[ApiUrl] = None):
self._token = token
super().__init__(None, None, api_url)
def get_header(self, params):
return {"token": self._token}
| 31.831579
| 88
| 0.616237
|
6ca3c66bb40ead0935edfe9c434651b6246cbc39
| 2,008
|
py
|
Python
|
tests/sources/test_matlab_functions.py
|
SherlockSheep/webots
|
13bb7f13f15cd1a003b64368b1fd8783732e175e
|
[
"Apache-2.0"
] | 1
|
2020-09-28T08:34:32.000Z
|
2020-09-28T08:34:32.000Z
|
tests/sources/test_matlab_functions.py
|
congleetea/webots
|
f7bf0cb385842478c5635e29db8e25689d3d0c65
|
[
"Apache-2.0"
] | null | null | null |
tests/sources/test_matlab_functions.py
|
congleetea/webots
|
f7bf0cb385842478c5635e29db8e25689d3d0c65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that all the required Matlab functions are defined."""
import unittest
import os
class TestMatlabFunctions(unittest.TestCase):
"""Unit test for checking that all the required Matlab functions are defined."""
def setUp(self):
"""Get all the required function."""
skippedLines = [
'wbr',
'microphone',
'remote_control',
'robot',
'wb_device_get_type',
'wb_node_get_name',
'lookup_table_size',
'EXPORTS'
]
self.functions = []
filename = os.environ['WEBOTS_HOME'] + '/src/Controller/Controller.def'
self.assertTrue(
os.path.isfile(filename),
msg='Missing "%s" file.' % filename
)
with open(filename) as file:
for line in file:
if not any(skippedLine in line for skippedLine in skippedLines):
self.functions.append(line.replace('\n', ''))
def test_matlab_function_exists(self):
"""Test that the fucntion file exists."""
for function in self.functions:
filename = os.environ['WEBOTS_HOME'] + '/lib/controller/matlab/' + function + '.m'
self.assertTrue(
os.path.isfile(filename),
msg='Missing "%s" file.' % filename
)
if __name__ == '__main__':
unittest.main()
| 33.466667
| 94
| 0.621016
|
eaeed368105f93a7198ca2ffd0b883e20fc903e3
| 8,135
|
py
|
Python
|
problems/dist_mnist_problem.py
|
javieryu/nn_distributed_training
|
ebbae5317efc514437374fc2f698fbcec9debab4
|
[
"MIT"
] | 3
|
2021-10-14T19:53:19.000Z
|
2022-02-22T01:14:44.000Z
|
problems/dist_mnist_problem.py
|
javieryu/nn_distributed_training
|
ebbae5317efc514437374fc2f698fbcec9debab4
|
[
"MIT"
] | null | null | null |
problems/dist_mnist_problem.py
|
javieryu/nn_distributed_training
|
ebbae5317efc514437374fc2f698fbcec9debab4
|
[
"MIT"
] | null | null | null |
import os
import copy
import torch
class DistMNISTProblem:
"""An object that manages the various datastructures for a distributed
optimization problem on the MNIST classification problem. In addition,
it computes, stores, and writes out relevant metrics.
Author: Javier Yu, javieryu@stanford.edu, July 20, 2021
"""
def __init__(
self,
graph,
base_model,
base_loss,
train_sets,
val_set,
device,
conf,
):
self.graph = graph
self.base_loss = base_loss
self.train_sets = train_sets
self.val_set = val_set
self.conf = conf
# Extract some useful info
self.N = graph.number_of_nodes()
self.n = torch.nn.utils.parameters_to_vector(
base_model.parameters()
).shape[0]
self.device = device
# Copy the base_model for each node
self.models = {i: copy.deepcopy(base_model) for i in range(self.N)}
for i in range(self.N):
self.models[i] = self.models[i].to(self.device)
# Create train loaders and iterators with specified batch size
self.train_loaders = {}
self.train_iters = {}
for i in range(self.N):
self.train_loaders[i] = torch.utils.data.DataLoader(
self.train_sets[i],
batch_size=self.conf["train_batch_size"],
shuffle=True,
)
self.train_iters[i] = iter(self.train_loaders[i])
self.val_loader = torch.utils.data.DataLoader(
self.val_set, batch_size=self.conf["val_batch_size"]
)
# Initialize lists for metrics with names
self.metrics = {met_name: [] for met_name in self.conf["metrics"]}
self.epoch_tracker = torch.zeros(self.N)
self.forward_cnt = 0
def local_batch_loss(self, i):
"""Forward pass on a batch data for model at node i,
and if it's node_id = 0 then increment a metric that
counts the number of forward passes. Also increment an
epoch tracker for each node when the iterator resets.
Finally compute loss based on base_loss function and return.
Note: if for whatever reason there isn't a node zero then
this method's metric increment step will fail.
Args:
i (int): Node id
Returns:
(torch.Tensor): Loss of node i's model on a batch of
local data.
"""
try:
x, y = next(self.train_iters[i])
except StopIteration:
self.epoch_tracker[i] += 1
self.train_iters[i] = iter(self.train_loaders[i])
x, y = next(self.train_iters[i])
if i == 0:
# Count the number of forward passes that have been performed
# because this is symmetric across nodes we only have to do
# this for node 0, and it will be consistent with all nodes.
self.forward_cnt += self.conf["train_batch_size"]
yh = self.models[i].forward(x.to(self.device))
return self.base_loss(yh, y.to(self.device))
def update_graph(self):
"""Placeholder because the graph is not dynamic in this problem."""
pass
def save_metrics(self, output_dir):
"""Save current metrics lists to a PT file."""
file_name = self.conf["problem_name"] + "_results.pt"
file_path = os.path.join(output_dir, file_name)
torch.save(self.metrics, file_path)
return
def validate(self, i):
"""Compute the loss and accuracy of a
single node's model on the validation set.
Args:
i (int): Node id
"""
with torch.no_grad():
loss = 0.0
correct = 0
correct_list = []
for x, y in self.val_loader:
x, y = x.to(self.device), y.to(self.device)
yh = self.models[i].forward(x)
loss += self.base_loss(yh, y).item()
pred = yh.argmax(dim=1, keepdim=True)
correct_vec = pred.eq(y.view_as(pred))
correct += correct_vec.sum().item()
correct_list.append(correct_vec)
avg_loss = loss / len(self.val_loader.dataset)
acc = correct / len(self.val_loader.dataset)
return avg_loss, acc, torch.vstack(correct_list)
def evaluate_metrics(self, at_end=False):
"""Evaluate models, and then append values to the metric lists."""
# Compute validation loss and accuracy (if you do one you might
# as well do the other)
if (
"validation_loss" in self.metrics
or "top1_accuracy" in self.metrics
or "valdiation_as_vector" in self.metrics
):
avg_losses = torch.zeros(self.N)
accs = torch.zeros(self.N)
valid_vecs = {}
for i in range(self.N):
avg_losses[i], accs[i], valid_vecs[i] = self.validate(i)
evalprint = "| "
for met_name in self.conf["metrics"]:
if met_name == "consensus_error":
# The average distance from a single node to all
# of the other nodes in the problem
with torch.no_grad():
all_params = [
torch.nn.utils.parameters_to_vector(
self.models[i].parameters()
)
for i in range(self.N)
]
# Stack all of the parameters into rows
th_stack = torch.stack(all_params)
# Normalize the stack
th_stack = torch.nn.functional.normalize(th_stack, dim=1)
# Compute row-wise distances
distances_all = torch.cdist(th_stack, th_stack)
th_mean = torch.mean(th_stack, dim=0).reshape(1, -1)
distances_mean = torch.cdist(th_stack, th_mean)
# append metrics and generate print string
self.metrics[met_name].append((distances_all, distances_mean))
evalprint += "Consensus: {:.4f} - {:.4f} | ".format(
torch.amin(distances_mean).item(),
torch.amax(distances_mean).item(),
)
elif met_name == "validation_loss":
# Average node loss on the validation dataset
self.metrics[met_name].append(avg_losses)
evalprint += "Val Loss: {:.4f} - {:.4f} | ".format(
torch.amin(avg_losses).item(),
torch.amax(avg_losses).item(),
)
elif met_name == "top1_accuracy":
# Top1 accuracy of nodes on the validation dataset
self.metrics[met_name].append(accs)
evalprint += "Top1: {:.2f} - {:.2f} |".format(
torch.amin(accs).item(), torch.amax(accs).item()
)
elif met_name == "forward_pass_count":
# Number of forward passes performed by each node
self.metrics[met_name].append(self.forward_cnt)
evalprint += "Num Forward: {} | ".format(self.forward_cnt)
elif met_name == "current_epoch":
# Current epoch of each node (only different if the datasets at
# each node are not the same size)
self.metrics[met_name].append(
copy.deepcopy(self.epoch_tracker)
)
evalprint += "Ep Range: {} - {} | ".format(
int(torch.amin(self.epoch_tracker).item()),
int(torch.amax(self.epoch_tracker).item()),
)
elif met_name == "validation_as_vector":
# Returns the validations prediction correctness vector
# Nothing to print from here
self.metrics[met_name].append(valid_vecs)
else:
raise NameError("Unknown metric.")
print(evalprint)
return
| 38.372642
| 79
| 0.55083
|
fc0f89b83d59281a84ab6dea909d143a4622a2ae
| 23,212
|
py
|
Python
|
python/ray/tune/ray_trial_executor.py
|
wingman-ai/ray
|
23e1ccc1ea5d7e5789628d3eb85ff6bc8d0a4359
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/ray_trial_executor.py
|
wingman-ai/ray
|
23e1ccc1ea5d7e5789628d3eb85ff6bc8d0a4359
|
[
"Apache-2.0"
] | 4
|
2019-03-04T13:03:24.000Z
|
2019-06-06T11:25:07.000Z
|
python/ray/tune/ray_trial_executor.py
|
wingman-ai/ray
|
23e1ccc1ea5d7e5789628d3eb85ff6bc8d0a4359
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import random
import time
import traceback
import ray
from ray.tune.error import AbortTrialExecution
from ray.tune.logger import NoopLogger
from ray.tune.trial import Trial, Resources, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.tune.util import warn_if_slow
logger = logging.getLogger(__name__)
RESOURCE_REFRESH_PERIOD = 0.5 # Refresh resources every 500 ms
BOTTLENECK_WARN_PERIOD_S = 60
NONTRIVIAL_WAIT_TIME_THRESHOLD_S = 1e-3
class _LocalWrapper(object):
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class RayTrialExecutor(TrialExecutor):
"""An implemention of TrialExecutor based on Ray."""
def __init__(self,
queue_trials=False,
reuse_actors=False,
ray_auto_init=False,
refresh_period=RESOURCE_REFRESH_PERIOD):
super(RayTrialExecutor, self).__init__(queue_trials)
self._running = {}
# Since trial resume after paused should not run
# trial.train.remote(), thus no more new remote object id generated.
# We use self._paused to store paused trials here.
self._paused = {}
self._reuse_actors = reuse_actors
self._cached_actor = None
self._avail_resources = Resources(cpu=0, gpu=0)
self._committed_resources = Resources(cpu=0, gpu=0)
self._resources_initialized = False
self._refresh_period = refresh_period
self._last_resource_refresh = float("-inf")
self._last_nontrivial_wait = time.time()
if not ray.is_initialized() and ray_auto_init:
logger.info("Initializing Ray automatically."
"For cluster usage or custom Ray initialization, "
"call `ray.init(...)` before `tune.run`.")
ray.init()
if ray.is_initialized():
self._update_avail_resources()
def _setup_runner(self, trial, reuse_allowed):
if (self._reuse_actors and reuse_allowed
and self._cached_actor is not None):
logger.debug("Reusing cached runner {} for {}".format(
self._cached_actor, trial.trial_id))
existing_runner = self._cached_actor
self._cached_actor = None
else:
if self._cached_actor:
logger.debug(
"Cannot reuse cached runner {} for new trial".format(
self._cached_actor))
self._cached_actor.stop.remote()
self._cached_actor.__ray_terminate__.remote()
self._cached_actor = None
existing_runner = None
cls = ray.remote(
num_cpus=trial.resources.cpu,
num_gpus=trial.resources.gpu,
resources=trial.resources.custom_resources)(
trial._get_trainable_cls())
trial.init_logger()
# We checkpoint metadata here to try mitigating logdir duplication
self.try_checkpoint_metadata(trial)
remote_logdir = trial.logdir
if existing_runner:
trial.runner = existing_runner
if not self.reset_trial(trial, trial.config, trial.experiment_tag):
raise AbortTrialExecution(
"Trial runner reuse requires reset_trial() to be "
"implemented and return True.")
return existing_runner
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
return cls.remote(config=trial.config, logger_creator=logger_creator)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
"""
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(
trial,
reuse_allowed=checkpoint is not None
or trial._checkpoint.value is not None)
if not self.restore(trial, checkpoint):
if trial.status == Trial.ERROR:
raise RuntimeError(
"Restore from checkpoint failed for Trial {}.".format(
str(trial)))
previous_run = self._find_item(self._paused, trial)
if (prior_status == Trial.PAUSED and previous_run):
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial)
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# note that we don't return the resources, since they may
# have been lost
def _find_item(self, dictionary, item):
out = [rid for rid, t in dictionary.items() if t is item]
return out
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Returning resources for Trial %s.", str(trial))
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id)
def continue_training(self, trial):
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
"""
trial_future = self._find_item(self._running, trial)
if trial_future:
self._paused[trial_future[0]] = trial
super(RayTrialExecutor, self).pause_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val
def get_running_trials(self):
"""Returns the running trials."""
return list(self._running.values())
def get_next_available_trial(self):
shuffled_results = list(self._running.keys())
random.shuffle(shuffled_results)
# Note: We shuffle the results because `ray.wait` by default returns
# the first available result, and we want to guarantee that slower
# trials (i.e. trials that run remotely) also get fairly reported.
# See https://github.com/ray-project/ray/issues/4211 for details.
start = time.time()
[result_id], _ = ray.wait(shuffled_results)
wait_time = time.time() - start
if wait_time > NONTRIVIAL_WAIT_TIME_THRESHOLD_S:
self._last_nontrivial_wait = time.time()
if time.time() - self._last_nontrivial_wait > BOTTLENECK_WARN_PERIOD_S:
logger.warn(
"Over the last {} seconds, the Tune event loop has been "
"backlogged processing new results. Consider increasing your "
"period of result reporting to improve performance.".format(
BOTTLENECK_WARN_PERIOD_S))
self._last_nontrivial_wait = time.time()
return self._running[result_id]
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run."""
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0])
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result
def _commit_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) + resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu + resources.cpu_total(),
committed.gpu + resources.gpu_total(),
custom_resources=custom_resources)
def _return_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) - resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu - resources.cpu_total(),
committed.gpu - resources.gpu_total(),
custom_resources=custom_resources)
assert self._committed_resources.is_nonnegative(), (
"Resource invalid: {}".format(resources))
def _update_avail_resources(self, num_retries=5):
for i in range(num_retries):
try:
resources = ray.cluster_resources()
except Exception:
# TODO(rliaw): Remove this when local mode is fixed.
# https://github.com/ray-project/ray/issues/4147
logger.debug("Using resources for local machine.")
resources = ray.services.check_and_update_resources(
None, None, None)
if not resources:
logger.warning(
"Cluster resources not detected or are 0. Retrying...")
time.sleep(0.5)
if not resources:
# NOTE: This hides the possibility that Ray may be waiting for
# clients to connect.
resources.setdefault("CPU", 0)
resources.setdefault("GPU", 0)
logger.warning("Cluster resources cannot be detected or are 0. "
"You can resume this experiment by passing in "
"`resume=True` to `run`.")
resources = resources.copy()
num_cpus = resources.pop("CPU", 0)
num_gpus = resources.pop("GPU", 0)
custom_resources = resources
self._avail_resources = Resources(
int(num_cpus), int(num_gpus), custom_resources=custom_resources)
self._last_resource_refresh = time.time()
self._resources_initialized = True
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
"""
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
return True
can_overcommit = self._queue_trials
if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \
(resources.gpu_total() > 0 and currently_available.gpu <= 0) or \
any((resources.get_res_total(res_name) > 0
and currently_available.get(res_name) <= 0)
for res_name in resources.custom_resources):
can_overcommit = False # requested resource is already saturated
if can_overcommit:
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = "Resources requested: {}/{} CPUs, {}/{} GPUs".format(
self._committed_resources.cpu, self._avail_resources.cpu,
self._committed_resources.gpu, self._avail_resources.gpu)
customs = ", ".join([
"{}/{} {}".format(
self._committed_resources.get_res_total(name),
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources
])
if customs:
status += " ({})".format(customs)
return status
else:
return "Resources requested: ?"
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
self._avail_resources.gpu)
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs"
def on_step_begin(self):
"""Before step() called, update the available resources."""
self._update_avail_resources()
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint."""
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.value = trial.runner.save_to_object.remote()
else:
# Keeps only highest performing checkpoints if enabled
if trial.keep_checkpoints_num:
try:
last_attr_val = trial.last_result[
trial.checkpoint_score_attr]
if (trial.compare_checkpoints(last_attr_val)
and not math.isnan(last_attr_val)):
trial.best_checkpoint_attr_value = last_attr_val
self._checkpoint_and_erase(trial)
except KeyError:
logger.warning(
"Result dict has no key: {}. keep"
"_checkpoints_num flag will not work".format(
trial.checkpoint_score_attr))
else:
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(
trial.runner.save.remote())
return trial._checkpoint.value
@staticmethod
def _checkpoint_and_erase(trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
if len(trial.history) >= trial.keep_checkpoints_num:
ray.get(trial.runner.delete_checkpoint.remote(trial.history[-1]))
trial.history.pop()
trial.history.insert(0, trial._checkpoint.value)
def _checkpoint_and_erase(self, subdir, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
subdir string: either "" or "best"
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value, folder_path = ray.get(trial.runner.save_checkpoint_relative.remote(subdir))
if trial.prefix[subdir]["limit"]:
if len(trial.prefix[subdir]["history"]) == trial.prefix[subdir]["limit"]:
ray.get(trial.runner.delete_checkpoint.remote(trial.prefix[subdir]["history"][-1]))
trial.prefix[subdir]["history"].pop()
trial.prefix[subdir]["history"].insert(0, folder_path)
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
This will also sync the trial results to a new location
if restoring on a different node.
"""
if checkpoint is None or checkpoint.value is None:
checkpoint = trial._checkpoint
if checkpoint is None or checkpoint.value is None:
return True
if trial.runner is None:
logger.error("Unable to restore - no runner.")
self.set_status(trial, Trial.ERROR)
return False
try:
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
assert type(value) != Checkpoint, type(value)
trial.runner.restore_from_object.remote(value)
else:
worker_ip = ray.get(trial.runner.current_ip.remote())
trial.sync_logger_to_new_location(worker_ip)
with warn_if_slow("restore_from_disk"):
ray.get(trial.runner.restore.remote(value))
trial.last_result = checkpoint.last_result
return True
except Exception:
logger.exception("Error restoring runner for Trial %s.", trial)
self.set_status(trial, Trial.ERROR)
return False
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
return ray.get(
trial.runner.export_model.remote(trial.export_formats))
return {}
| 39.678632
| 112
| 0.59844
|
8678dd567db519d670dca84b6f8a196b5074d200
| 2,377
|
py
|
Python
|
scripts/NineAnimeDownloader.py
|
jtatia/Anime-Downloader
|
619386734fa1412cf6eaa42c36a88f7e336bb93d
|
[
"MIT"
] | 24
|
2017-05-25T19:20:46.000Z
|
2020-09-01T01:08:04.000Z
|
scripts/NineAnimeDownloader.py
|
jtatia/Anime-Downloader
|
619386734fa1412cf6eaa42c36a88f7e336bb93d
|
[
"MIT"
] | 3
|
2017-06-05T13:04:45.000Z
|
2018-11-05T14:41:15.000Z
|
scripts/NineAnimeDownloader.py
|
jtatia/Anime-Downloader
|
619386734fa1412cf6eaa42c36a88f7e336bb93d
|
[
"MIT"
] | 5
|
2017-10-23T21:01:57.000Z
|
2019-08-20T08:35:35.000Z
|
import os
import re
import sys
import time
import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
site = "https://9anime.to/"
# There will be an option between providing the anime name(option "1" ) and the anime episode 1 page (option "2").
anime_name = ""
anime_episode_page = ""
path = sys.argv[3]
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
browser = webdriver.Firefox()
browser.set_window_size(1024, 768)
# Now search the anime if anime name is provided
if "1" == sys.argv[1]:
try:
browser.get(site)
time.sleep(5)
anime_name = sys.argv[2]
#browser.save_screenshot("mainpage.png")
search = browser.find_element_by_css_selector(".inner > input:nth-child(2)")
search.click()
search.send_keys(anime_name + Keys.ENTER)
time.sleep(5)
#browser.save_screenshot('searchresult.png')
anime_episode_page = browser.find_element_by_css_selector("div.col-lg-4:nth-child(1) > div:nth-child(1) > a:nth-child(1)")
anime_episode_page.click()
except:
print("Error")
sys.exit(1)
else:
try:
anime_episode_page = sys.argv[2]
browser.get(anime_episode_page)
time.sleep(10)
browser.execute_script("window.stop();")
except:
print(anime_episode_page)
print("Error")
sys.exit(2)
# Now we have reached the page of episode number 1 (or n if used method 2)...Moving Forward
download_link=""
link=""
time.sleep(5)
count=1
while True:
print("Downloading Episode "+str(count)+".mp4")
try:
time.sleep(3)
download_link = browser.find_element_by_css_selector("a.item")
link = download_link.get_attribute('href')
downloaded = requests.get(link)
except:
print("Error")
break
d = downloaded.headers["content-disposition"]
fname = re.findall("filename=(.+)", d)[0]
f = open(path + fname[1:len(fname)-1], "wb")
for con in downloaded.iter_content(100000):
f.write(con)
print("Downloaded " + fname)
f.close()
count+=1
# Let us go to the next page and if error occurs that means we have reached the last page and break
try:
next = browser.find_element_by_css_selector("div.item.mbtn.next.disabled")
#print("Unable to click")
break;
except:
next = browser.find_element_by_css_selector("div.item.mbtn.next")
next.click()
time.sleep(3)
#print("clicked")
browser.quit()
#print("All possible episodes have been looped through")
| 23.77
| 124
| 0.717291
|
44a67328046fd7280dd63d7732ea6f91a6e284e3
| 1,786
|
py
|
Python
|
src/projects/ex49/ex49/parser.py
|
SpyrosDellas/learn-python-the-hard-way
|
bdbed7658f2650b12f7a1388adf27ad436353f61
|
[
"MIT"
] | null | null | null |
src/projects/ex49/ex49/parser.py
|
SpyrosDellas/learn-python-the-hard-way
|
bdbed7658f2650b12f7a1388adf27ad436353f61
|
[
"MIT"
] | null | null | null |
src/projects/ex49/ex49/parser.py
|
SpyrosDellas/learn-python-the-hard-way
|
bdbed7658f2650b12f7a1388adf27ad436353f61
|
[
"MIT"
] | null | null | null |
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, object):
# remember we take ('noun', 'princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = object[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next = peek(word_list)
if next == 'noun':
return match(word_list, 'noun')
if next == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list, subj):
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
def parse_sentence(word_list):
skip(word_list, 'stop')
start = peek(word_list)
if start == 'noun':
subj = match(word_list, 'noun')
return parse_subject(word_list, subj)
elif start == 'verb':
# assume the subject is the player then
return parse_subject(word_list, ('noun', 'player'))
else:
raise ParserError("Must start with subject, object, or verb not: %s" % start)
| 22.325
| 85
| 0.614222
|
c0ac5751ce285c5e4199f9c1a2a54af442dd63b3
| 8,246
|
py
|
Python
|
lldb/examples/python/lldb_module_utils.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/examples/python/lldb_module_utils.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/examples/python/lldb_module_utils.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import lldb
import optparse
import shlex
import string
import sys
class DumpLineTables:
command_name = "dump-line-tables"
short_decription = "Dumps full paths to compile unit files and optionally all line table files."
description = 'Dumps all line tables from all compile units for any modules specified as arguments. Specifying the --verbose flag will output address ranges for each line entry.'
usage = "usage: %prog [options] MODULE1 [MODULE2 ...]"
def create_options(self):
self.parser = optparse.OptionParser(
description=self.description,
prog=self.command_name,
usage=self.usage)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='Display verbose output.',
default=False)
def get_short_help(self):
return self.short_decription
def get_long_help(self):
return self.help_string
def __init__(self, debugger, unused):
self.create_options()
self.help_string = self.parser.format_help()
def __call__(self, debugger, command, exe_ctx, result):
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, args) = self.parser.parse_args(command_args)
except:
# if you don't handle exceptions, passing an incorrect argument to the OptionParser will cause LLDB to exit
# (courtesy of OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
# Always get program state from the SBExecutionContext passed in as exe_ctx
target = exe_ctx.GetTarget()
if not target.IsValid():
result.SetError("invalid target")
return
for module_path in args:
module = target.module[module_path]
if not module:
result.SetError('no module found that matches "%s".' % (module_path))
return
num_cus = module.GetNumCompileUnits()
print('Module: "%s"' % (module.file.fullpath), end=' ', file=result)
if num_cus == 0:
print('no debug info.', file=result)
continue
print('has %u compile units:' % (num_cus), file=result)
for cu_idx in range(num_cus):
cu = module.GetCompileUnitAtIndex(cu_idx)
print(' Compile Unit: %s' % (cu.file.fullpath), file=result)
for line_idx in range(cu.GetNumLineEntries()):
line_entry = cu.GetLineEntryAtIndex(line_idx)
start_file_addr = line_entry.addr.file_addr
end_file_addr = line_entry.end_addr.file_addr
# If the two addresses are equal, this line table entry
# is a termination entry
if options.verbose:
if start_file_addr != end_file_addr:
result.PutCString(
' [%#x - %#x): %s' %
(start_file_addr, end_file_addr, line_entry))
else:
if start_file_addr == end_file_addr:
result.PutCString(' %#x: END' %
(start_file_addr))
else:
result.PutCString(
' %#x: %s' %
(start_file_addr, line_entry))
if start_file_addr == end_file_addr:
result.PutCString("\n")
class DumpFiles:
command_name = "dump-files"
short_description = "Dumps full paths to compile unit files and optionally all line table files."
usage = "usage: %prog [options] MODULE1 [MODULE2 ...]"
description = '''This class adds a dump-files command to the LLDB interpreter.
This command will dump all compile unit file paths found for each source file
for the binaries specified as arguments in the current target. Specify the
--support-files or -s option to see all file paths that a compile unit uses in
its lines tables. This is handy for troubleshooting why breakpoints aren't
working in IDEs that specify full paths to source files when setting file and
line breakpoints. Sometimes symlinks cause the debug info to contain the symlink
path and an IDE will resolve the path to the actual file and use the resolved
path when setting breakpoints.
'''
def create_options(self):
# Pass add_help_option = False, since this keeps the command in line with lldb commands,
# and we wire up "help command" to work by providing the long & short help methods below.
self.parser = optparse.OptionParser(
description = self.description,
prog = self.command_name,
usage = self.usage,
add_help_option = False)
self.parser.add_option(
'-s',
'--support-files',
action = 'store_true',
dest = 'support_files',
help = 'Dumps full paths to all files used in a compile unit.',
default = False)
def get_short_help(self):
return self.short_description
def get_long_help(self):
return self.help_string
def __init__(self, debugger, unused):
self.create_options()
self.help_string = self.parser.format_help()
def __call__(self, debugger, command, exe_ctx, result):
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, args) = self.parser.parse_args(command_args)
except:
# if you don't handle exceptions, passing an incorrect argument to the OptionParser will cause LLDB to exit
# (courtesy of OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
# Always get program state from the SBExecutionContext passed in as exe_ctx
target = exe_ctx.GetTarget()
if not target.IsValid():
result.SetError("invalid target")
return
if len(args) == 0:
result.SetError("one or more executable paths must be specified")
return
for module_path in args:
module = target.module[module_path]
if not module:
result.SetError('no module found that matches "%s".' % (module_path))
return
num_cus = module.GetNumCompileUnits()
print('Module: "%s"' % (module.file.fullpath), end=' ', file=result)
if num_cus == 0:
print('no debug info.', file=result)
continue
print('has %u compile units:' % (num_cus), file=result)
for i in range(num_cus):
cu = module.GetCompileUnitAtIndex(i)
print(' Compile Unit: %s' % (cu.file.fullpath), file=result)
if options.support_files:
num_support_files = cu.GetNumSupportFiles()
for j in range(num_support_files):
path = cu.GetSupportFileAtIndex(j).fullpath
print(' file[%u]: %s' % (j, path), file=result)
def __lldb_init_module(debugger, dict):
# This initializer is being run from LLDB in the embedded command interpreter
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
'command script add -c %s.DumpLineTables %s' % (__name__,
DumpLineTables.command_name))
debugger.HandleCommand(
'command script add -c %s.DumpFiles %s' % (__name__, DumpFiles.command_name))
print('The "%s" and "%s" commands have been installed.' % (DumpLineTables.command_name,
DumpFiles.command_name))
| 42.725389
| 182
| 0.590953
|
7461a5bd23d9ffc9b87c77c38227dbf8363abf4b
| 244
|
py
|
Python
|
venv/lib/python3.6/site-packages/pushshift_py/__init__.py
|
gonzaloetjo/Crypto_Trading_Bot
|
e67493a611ddded07455be35d46dd3cd4e8003d4
|
[
"Unlicense"
] | 13
|
2019-07-13T08:20:39.000Z
|
2021-05-04T02:18:58.000Z
|
venv/lib/python3.6/site-packages/pushshift_py/__init__.py
|
gonzaloetjo/Crypto_Trading_Bot
|
e67493a611ddded07455be35d46dd3cd4e8003d4
|
[
"Unlicense"
] | 5
|
2019-06-07T23:57:59.000Z
|
2021-06-25T15:20:08.000Z
|
venv/lib/python3.6/site-packages/pushshift_py/__init__.py
|
gonzaloetjo/Crypto_Trading_Bot
|
e67493a611ddded07455be35d46dd3cd4e8003d4
|
[
"Unlicense"
] | 1
|
2021-07-07T16:07:13.000Z
|
2021-07-07T16:07:13.000Z
|
"""
Pushshift.io API Wrapper (for reddit.com public comment/submission search)
https://github.com/typenil/pushshift.py
"""
from pushshift_py.pushshift_api import PushshiftAPI
from pushshift_py.pushshift_api_minimal import PushshiftAPIMinimal
| 27.111111
| 74
| 0.831967
|
b900a9d7b29ad7b68f9029c842e0fdcacef8eed3
| 10,988
|
py
|
Python
|
src/argteller/widgets/widgets.py
|
mozjay0619/argteller-viz
|
963c6d43019efb2b0e9bcdb4b3053b57cd4ff373
|
[
"BSD-3-Clause"
] | null | null | null |
src/argteller/widgets/widgets.py
|
mozjay0619/argteller-viz
|
963c6d43019efb2b0e9bcdb4b3053b57cd4ff373
|
[
"BSD-3-Clause"
] | 7
|
2021-08-04T15:54:07.000Z
|
2021-09-17T17:40:38.000Z
|
src/argteller/widgets/widgets.py
|
mozjay0619/argteller-viz
|
963c6d43019efb2b0e9bcdb4b3053b57cd4ff373
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from IPython.display import display
import ipywidgets as widgets
from ipywidgets import HBox, Label, VBox
from ipywidgets import Button, Layout, HTML
from traitlets import MetaHasTraits
except ModuleNotFoundError:
class VBox():
pass
class MetaHasTraits():
pass
class ParamTextWidget(VBox):
def __init__(self, name, example=None, default_value=None, preset_value=None, optional=False, widget=None,
widget_initialized=None, param_setter_event=None, set_from=None):
if not isinstance(VBox, MetaHasTraits):
return
self.name = name
self.type = 'text'
self.initial = not widget_initialized
self.param_setter_event = param_setter_event
style = style = {'description_width': 'initial'}
layout = Layout(display='flex',
flex_flow='row',
flex_basis='auto',
align_content='stretch',
justify_content='center',
# align_items='baseline stretch',
width='70%',
flex_wrap='wrap',
border=None,
# flex_basis='200%'
)
if set_from:
label = widgets.HTML(f"<b><font size=2 color='grey'>{self.name} (set via {set_from})</b>")
elif preset_value:
label = widgets.HTML(f"<b><font size=2 color='blue'>{self.name}</b>")
elif optional:
label = widgets.HTML(f"<b><font size=2 color='grey'>{self.name} (optional)</b>")
else:
label = widgets.HTML(f"<b><font size=2 color='black'>{self.name}</b>")
if widget:
self.widget = widget
else:
if example is None:
self.widget = VBox([widgets.Text(style=style, layout=layout)])
else:
self.widget = VBox([
widgets.Label(value=example),
widgets.Text(style=style, layout=layout)])
if self.initial or self.param_setter_event.isSet() : # So that user input is not overwritten every time.
if preset_value is not None: # So that preset values take precedence over default values.
self.widget.children[-1].value = str(preset_value)
elif default_value is not None:
self.widget.children[-1].value = str(default_value)
self.initial = False
children = [label, self.widget]
super().__init__(children=children)
def get_value(self):
return self.widget.children[-1].value
class ParamBooleanWidget(VBox):
def __init__(self, name, example=None,
default_value=None, preset_value=None, optional=False, widget=None, widget_initialized=None, param_setter_event=None):
if not isinstance(VBox, MetaHasTraits):
return
self.name = name
self.type = 'boolean'
self.initial = not widget_initialized
self.param_setter_event = param_setter_event
if preset_value:
label = widgets.HTML(f"<b><font size=2 color='blue'>{self.name}</b>")
elif optional:
label = widgets.HTML(f"<b><font size=2 color='grey'>{self.name}</b>")
else:
label = widgets.HTML(f"<b><font size=2 color='black'>{self.name}</b>")
if widget:
self.widget = widget
else:
# if example is None:
# self.widget = VBox([widgets.Text(style=style, layout=layout)])
# else:
# self.widget = VBox([
# widgets.Label(value=example),
# widgets.Text(style=style, layout=layout)])
if example is None:
self.widget = VBox([
widgets.Checkbox(
value=False,
description='True',
disabled=False,
indent=False
)])
else:
self.widget = VBox([
widgets.Label(value=example),
widgets.Checkbox(
value=False,
description='True',
disabled=False,
indent=False)
])
# self.widget = VBox([
# widgets.Label(value=example),
# widgets.Text(style=style, layout=layout)])
if self.initial or self.param_setter_event.isSet() : # So that user input is not overwritten every time.
if preset_value is not None: # So that preset values take precedence over default values.
self.widget.children[-1].value = bool(preset_value)
elif default_value is not None:
self.widget.children[-1].value = bool(default_value)
else:
self.widget.children[-1].value = False
self.initial = False
children = [label, self.widget]
super().__init__(children=children)
def get_value(self):
return self.widget.children[-1].value
class ParamChoiceWidget(VBox):
def __init__(self, name, example=None,
options=None, default_value=None, preset_value=None, optional=False, widget=None, widget_initialized=None, param_setter_event=None):
if not isinstance(VBox, MetaHasTraits):
return
self.name = name
self.type = 'choice'
self.initial = not widget_initialized
self.param_setter_event = param_setter_event
layout = Layout(width='auto')
if preset_value:
label = widgets.HTML(f"<b><font size=2 color='blue'>{self.name}</b>")
elif optional:
label = widgets.HTML(f"<b><font size=2 color='grey'>{self.name}</b>")
else:
label = widgets.HTML(f"<b><font size=2 color='black'>{self.name}</b>")
if widget:
self.widget = widget
else:
self.widget = VBox([widgets.RadioButtons(options=options, disabled=False, layout=layout)])
if self.initial or self.param_setter_event.isSet() : # So that user input is not overwritten every time.
if preset_value is not None: # So that preset values take precedence over default values.
self.widget.children[0].value = str(preset_value)
elif default_value is not None:
self.widget.children[0].value = str(default_value)
else:
self.widget.children[0].value = None
self.initial = False
children = [label, self.widget]
super().__init__(children=children)
def get_value(self):
return self.widget.children[-1].value
class ParamSetterWidget(VBox):
def __init__(self, name, widget, default_value=None, preset_value=None, widget_initialized=None, param_setter_event=None):
# this widget already exists
if not isinstance(VBox, MetaHasTraits):
return
self.name = name
self.type = 'param_setter'
self.initial = not widget_initialized
self.param_setter_event = param_setter_event
# This is just label for this widget. The values are actually being
# set on different widgets.
if default_value:
label = widgets.HTML(f"<b><font size=2 color='blue'>{self.name}</b>")
else:
label = widgets.HTML(f"<b><font size=2 color='black'>{self.name}</b>")
self.widget = widget
if self.initial or self.param_setter_event.isSet() : # So that user input is not overwritten every time.
if preset_value is not None: # So that preset values take precedence over default values.
# the widget is now VBox
self.widget.children[-1].value = str(preset_value)
elif default_value is not None:
self.widget.children[-1].value = str(default_value)
self.initial = False
children = [label, self.widget]
super().__init__(children=children)
class Custom1(VBox):
def __init__(self):
if not isinstance(VBox, MetaHasTraits):
return
self.name = 'custom1'
self.type = 'custom'
layout = {'width': '600px'}
style = {'description_width': 'initial'}
w1=widgets.IntRangeSlider(
value=[10, 150],
min=0,
max=300,
step=1,
description='Regs search range:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
layout=layout,
style=style
)
layout = {'width': '150px'}
style = {'description_width': 'initial'}
w2=widgets.Dropdown(
options=[str(elem) for elem in list(range(1, 10))],
value='1',
description='Search gaps:',
disabled=False,
layout=layout
)
layout = {'width': '600px'}
style = {'description_width': 'initial'}
w3=widgets.IntRangeSlider(
value=[5, 60],
min=0,
max=120,
step=1,
description='Days search range:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
layout=layout,
style=style
)
layout = {'width': '150px'}
style = {'description_width': 'initial'}
w4=widgets.Dropdown(
options=[str(elem) for elem in list(range(1, 10))],
value='1',
description='Search gaps:',
disabled=False,
layout=layout
)
h1 = HBox([w1, w2])
h2 = HBox([w3, w4])
label = widgets.HTML(f"<b><font size=2 color='black'>{'search_space'}</b>")
children = [label, h1, h2]
super().__init__(children=children)
def get_value(self):
region_range = self.children[0].children[0].value
region_jump = self.children[0].children[1].value
day_range = self.children[1].children[0].value
day_jump = self.children[1].children[1].value
d = {'num_days': list(range(*day_range, int(day_jump))),
'num_regions': list(range(*region_range, int(region_jump)))}
return d
| 30.865169
| 140
| 0.531762
|
fe5121a4666184393de6f2a4fca70c1cf2006da9
| 3,620
|
py
|
Python
|
build/lib.linux-x86_64-2.7/birdspotter/user_influence.py
|
rohitram96/BirdSpotter
|
f1697280ed9e4d4025529b535b8616de28b8efe7
|
[
"MIT"
] | 17
|
2020-07-15T00:56:51.000Z
|
2022-03-31T11:34:03.000Z
|
build/lib.linux-x86_64-2.7/birdspotter/user_influence.py
|
rohitram96/BirdSpotter
|
f1697280ed9e4d4025529b535b8616de28b8efe7
|
[
"MIT"
] | 6
|
2020-01-19T07:31:13.000Z
|
2021-01-26T22:47:08.000Z
|
build/lib.linux-x86_64-2.7/birdspotter/user_influence.py
|
rohitram96/BirdSpotter
|
f1697280ed9e4d4025529b535b8616de28b8efe7
|
[
"MIT"
] | null | null | null |
from functools import reduce
import pandas as pd
import numpy as np
def casIn(cascade, time_decay = -0.000068, alpha = None, beta = 1.0):
"""Computes influence in one cascade
Parameters
----------
cascade : str or DataFrame
Path to one cascade in a file
time_decay : float
The r parameter described in the paper
alpha : float, optional
A float between 0 and 1, as described in the paper. If None DebateNight method is used, else spatial-decay method, by default None
Returns
-------
DataFrame
A dataframe describing the influence of each user in a single cascade.
"""
if isinstance(cascade, str):
cascade = pd.read_csv(cascade_path) # Read one cascade from local file
p_ij = P(cascade, alpha = alpha, r=time_decay, beta = beta) # compute p_ij in given cascade
inf, m_ij = influence(p_ij, alpha) # compute user influence
cascade["influence"] = pd.Series(inf)
return cascade
def P(cascade, alpha = None, r = -0.000068, beta = 1.0):
"""Computes the P matrix of a cascade
The P matrix describes the stochastic retweet graph.
Parameters
----------
cascade : DataFrame
A dataframe describing a single cascade, with a time column ascending from 0, a magnitude column and index of user ids
alpha : float, optional
A float between 0 and 1, as described in the paper. If None DebateNight method is used, else spatial-decay method, by default None
r : float, optional
The time-decay r parameter described in the paper, by default -0.000068
beta : float, optional
A social strength hyper-parameter, by default 1.0
Returns
-------
array-like
A matrix of size (n,n), where n is the number of tweets in the cascade, where P[i][j] is the probability that j is a retweet of tweet i.
"""
n = len(cascade)
t = np.zeros(n,dtype = np.float64)
f = np.zeros(n,dtype = np.float64)
p = np.zeros((n,n),dtype = np.float64)
norm = np.zeros(n,dtype = np.float64)
for k, row in cascade.iterrows():
if k == 0:
p[0][0] = 1
t[0] = row['time']
if np.isnan(row['magnitude']):
print(row)
f[0] = 1 if row['magnitude'] == 0 else row['magnitude']
continue
t[k] = row['time']
f[k] = (1 if row['magnitude'] == 0 else row['magnitude'])**beta
p[:k, k] = ((r * (t[k] - t[0:k])) + np.log(f[0:k])) # store the P_ji in log space
norm[k] = reduce(np.logaddexp, p[:k, k])
p[:k, k] = np.exp(p[:k, k] - norm[k])# recover the P_ji from log space
p *= (alpha if alpha else 1)
return p
def influence(p, alpha = None):
"""Estimates user influence
This function compute the user influence and store it in matirx m_ij
Parameters
----------
p : array-like
The P matrix describing the stochastic retweet graph
alpha : float, optional
A float between 0 and 1, as described in the paper. If None DebateNight method is used, else spatial-decay method, by default None
Returns
-------
array-like, array-like
A n-array describing the influence of n users/tweets and the (n,n)-array describing the intermediary contribution of influence between tweets
"""
n = len(p)
m = np.zeros((n, n))
m[0, 0] = 1
for i in range(0, n-1):
vec = p[:i+1, i+1]
m[:i+1, i+1] = m[:i+1, :i+1]@vec
m[i+1, i+1] = (1-alpha if alpha else 1)
influence = np.sum(m, axis = 1)
return influence, m
| 35.490196
| 149
| 0.60442
|
6f463d4d5f9fc781ef84969e16cf6ea3544b6f83
| 2,037
|
py
|
Python
|
src/rubrix/sdk/models/token_classification_aggregations_mentions_additional_property.py
|
sakares/rubrix
|
791ffb29815b5d24f2bbbb0fa422f85f8b30098f
|
[
"Apache-2.0"
] | null | null | null |
src/rubrix/sdk/models/token_classification_aggregations_mentions_additional_property.py
|
sakares/rubrix
|
791ffb29815b5d24f2bbbb0fa422f85f8b30098f
|
[
"Apache-2.0"
] | null | null | null |
src/rubrix/sdk/models/token_classification_aggregations_mentions_additional_property.py
|
sakares/rubrix
|
791ffb29815b5d24f2bbbb0fa422f85f8b30098f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Type, TypeVar
import attr
T = TypeVar("T", bound="TokenClassificationAggregationsMentionsAdditionalProperty")
@attr.s(auto_attribs=True)
class TokenClassificationAggregationsMentionsAdditionalProperty:
""" """
additional_properties: Dict[str, int] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
token_classification_aggregations_mentions_additional_property = cls()
token_classification_aggregations_mentions_additional_property.additional_properties = (
d
)
return token_classification_aggregations_mentions_additional_property
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> int:
return self.additional_properties[key]
def __setitem__(self, key: str, value: int) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 32.854839
| 96
| 0.71134
|
8dbc9ce5e77aa0255444dc1e90397c8070647101
| 10,724
|
py
|
Python
|
sdks/python/apache_beam/transforms/userstate_test.py
|
lgajowy/beam
|
2834b2ddc5a7cdc553adf0cafb1e7850181e0e68
|
[
"Apache-2.0"
] | 1
|
2018-07-13T02:57:48.000Z
|
2018-07-13T02:57:48.000Z
|
sdks/python/apache_beam/transforms/userstate_test.py
|
lgajowy/beam
|
2834b2ddc5a7cdc553adf0cafb1e7850181e0e68
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/transforms/userstate_test.py
|
lgajowy/beam
|
2834b2ddc5a7cdc553adf0cafb1e7850181e0e68
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the Beam State and Timer API interfaces."""
import unittest
import mock
from apache_beam.coders import BytesCoder
from apache_beam.coders import VarIntCoder
from apache_beam.runners.common import DoFnSignature
from apache_beam.transforms.combiners import TopCombineFn
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.userstate import CombiningValueStateSpec
from apache_beam.transforms.userstate import TimerSpec
from apache_beam.transforms.userstate import UserStateUtils
from apache_beam.transforms.userstate import on_timer
class TestStatefulDoFn(DoFn):
"""An example stateful DoFn with state and timers."""
BUFFER_STATE_1 = BagStateSpec('buffer', BytesCoder())
BUFFER_STATE_2 = BagStateSpec('buffer2', VarIntCoder())
EXPIRY_TIMER_1 = TimerSpec('expiry1', TimeDomain.WATERMARK)
EXPIRY_TIMER_2 = TimerSpec('expiry2', TimeDomain.WATERMARK)
EXPIRY_TIMER_3 = TimerSpec('expiry3', TimeDomain.WATERMARK)
def process(self, element, t=DoFn.TimestampParam,
buffer_1=DoFn.StateParam(BUFFER_STATE_1),
buffer_2=DoFn.StateParam(BUFFER_STATE_2),
timer_1=DoFn.TimerParam(EXPIRY_TIMER_1),
timer_2=DoFn.TimerParam(EXPIRY_TIMER_2)):
yield element
@on_timer(EXPIRY_TIMER_1)
def on_expiry_1(self,
buffer=DoFn.StateParam(BUFFER_STATE_1),
timer_1=DoFn.TimerParam(EXPIRY_TIMER_1),
timer_2=DoFn.TimerParam(EXPIRY_TIMER_2),
timer_3=DoFn.TimerParam(EXPIRY_TIMER_3)):
yield 'expired1'
@on_timer(EXPIRY_TIMER_2)
def on_expiry_2(self,
buffer=DoFn.StateParam(BUFFER_STATE_2),
timer_2=DoFn.TimerParam(EXPIRY_TIMER_2),
timer_3=DoFn.TimerParam(EXPIRY_TIMER_3)):
yield 'expired2'
@on_timer(EXPIRY_TIMER_3)
def on_expiry_3(self,
buffer_1=DoFn.StateParam(BUFFER_STATE_1),
buffer_2=DoFn.StateParam(BUFFER_STATE_2),
timer_3=DoFn.TimerParam(EXPIRY_TIMER_3)):
yield 'expired3'
class InterfaceTest(unittest.TestCase):
def _validate_dofn(self, dofn):
# Construction of DoFnSignature performs validation of the given DoFn.
# In particular, it ends up calling userstate._validate_stateful_dofn.
# That behavior is explicitly tested below in test_validate_dofn()
DoFnSignature(dofn)
@mock.patch(
'apache_beam.transforms.userstate.UserStateUtils.validate_stateful_dofn')
def test_validate_dofn(self, unused_mock):
dofn = TestStatefulDoFn()
self._validate_dofn(dofn)
UserStateUtils.validate_stateful_dofn.assert_called_with(dofn)
def test_spec_construction(self):
BagStateSpec('statename', VarIntCoder())
with self.assertRaises(AssertionError):
BagStateSpec(123, VarIntCoder())
CombiningValueStateSpec('statename', VarIntCoder(), TopCombineFn(10))
with self.assertRaises(AssertionError):
CombiningValueStateSpec(123, VarIntCoder(), TopCombineFn(10))
with self.assertRaises(AssertionError):
CombiningValueStateSpec('statename', VarIntCoder(), object())
# BagStateSpec('bag', )
# TODO: add more spec tests
with self.assertRaises(ValueError):
DoFn.TimerParam(BagStateSpec('elements', BytesCoder()))
TimerSpec('timer', TimeDomain.WATERMARK)
TimerSpec('timer', TimeDomain.REAL_TIME)
with self.assertRaises(ValueError):
TimerSpec('timer', 'bogus_time_domain')
with self.assertRaises(ValueError):
DoFn.StateParam(TimerSpec('timer', TimeDomain.WATERMARK))
def test_param_construction(self):
with self.assertRaises(ValueError):
DoFn.StateParam(TimerSpec('timer', TimeDomain.WATERMARK))
with self.assertRaises(ValueError):
DoFn.TimerParam(BagStateSpec('elements', BytesCoder()))
def test_good_signatures(self):
class BasicStatefulDoFn(DoFn):
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
EXPIRY_TIMER = TimerSpec('expiry1', TimeDomain.WATERMARK)
def process(self, element, buffer=DoFn.StateParam(BUFFER_STATE),
timer1=DoFn.TimerParam(EXPIRY_TIMER)):
yield element
@on_timer(EXPIRY_TIMER)
def expiry_callback(self, element, timer=DoFn.TimerParam(EXPIRY_TIMER)):
yield element
self._validate_dofn(BasicStatefulDoFn())
self._validate_dofn(TestStatefulDoFn())
def test_bad_signatures(self):
# (1) The same state parameter is duplicated on the process method.
class BadStatefulDoFn1(DoFn):
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
def process(self, element, b1=DoFn.StateParam(BUFFER_STATE),
b2=DoFn.StateParam(BUFFER_STATE)):
yield element
with self.assertRaises(ValueError):
self._validate_dofn(BadStatefulDoFn1())
# (2) The same timer parameter is duplicated on the process method.
class BadStatefulDoFn2(DoFn):
TIMER = TimerSpec('timer', TimeDomain.WATERMARK)
def process(self, element, t1=DoFn.TimerParam(TIMER),
t2=DoFn.TimerParam(TIMER)):
yield element
with self.assertRaises(ValueError):
self._validate_dofn(BadStatefulDoFn2())
# (3) The same state parameter is duplicated on the on_timer method.
class BadStatefulDoFn3(DoFn):
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
EXPIRY_TIMER_1 = TimerSpec('expiry1', TimeDomain.WATERMARK)
EXPIRY_TIMER_2 = TimerSpec('expiry2', TimeDomain.WATERMARK)
@on_timer(EXPIRY_TIMER_1)
def expiry_callback(self, element, b1=DoFn.StateParam(BUFFER_STATE),
b2=DoFn.StateParam(BUFFER_STATE)):
yield element
with self.assertRaises(ValueError):
self._validate_dofn(BadStatefulDoFn3())
# (4) The same timer parameter is duplicated on the on_timer method.
class BadStatefulDoFn4(DoFn):
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
EXPIRY_TIMER_1 = TimerSpec('expiry1', TimeDomain.WATERMARK)
EXPIRY_TIMER_2 = TimerSpec('expiry2', TimeDomain.WATERMARK)
@on_timer(EXPIRY_TIMER_1)
def expiry_callback(self, element, t1=DoFn.TimerParam(EXPIRY_TIMER_2),
t2=DoFn.TimerParam(EXPIRY_TIMER_2)):
yield element
with self.assertRaises(ValueError):
self._validate_dofn(BadStatefulDoFn4())
def test_validation_typos(self):
# (1) Here, the user mistakenly used the same timer spec twice for two
# different timer callbacks.
with self.assertRaisesRegexp(
ValueError,
r'Multiple on_timer callbacks registered for TimerSpec\(expiry1\).'):
class StatefulDoFnWithTimerWithTypo1(DoFn): # pylint: disable=unused-variable
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
EXPIRY_TIMER_1 = TimerSpec('expiry1', TimeDomain.WATERMARK)
EXPIRY_TIMER_2 = TimerSpec('expiry2', TimeDomain.WATERMARK)
def process(self, element):
pass
@on_timer(EXPIRY_TIMER_1)
def on_expiry_1(self, buffer_state=DoFn.StateParam(BUFFER_STATE)):
yield 'expired1'
# Note that we mistakenly associate this with the first timer.
@on_timer(EXPIRY_TIMER_1)
def on_expiry_2(self, buffer_state=DoFn.StateParam(BUFFER_STATE)):
yield 'expired2'
# (2) Here, the user mistakenly used the same callback name and overwrote
# the first on_expiry_1 callback.
class StatefulDoFnWithTimerWithTypo2(DoFn):
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
EXPIRY_TIMER_1 = TimerSpec('expiry1', TimeDomain.WATERMARK)
EXPIRY_TIMER_2 = TimerSpec('expiry2', TimeDomain.WATERMARK)
def process(self, element,
timer1=DoFn.TimerParam(EXPIRY_TIMER_1),
timer2=DoFn.TimerParam(EXPIRY_TIMER_2)):
pass
@on_timer(EXPIRY_TIMER_1)
def on_expiry_1(self, buffer_state=DoFn.StateParam(BUFFER_STATE)):
yield 'expired1'
# Note that we mistakenly reuse the "on_expiry_2" name; this is valid
# syntactically in Python.
@on_timer(EXPIRY_TIMER_2)
def on_expiry_1(self, buffer_state=DoFn.StateParam(BUFFER_STATE)):
yield 'expired2'
# Use a stable string value for matching.
def __repr__(self):
return 'StatefulDoFnWithTimerWithTypo2'
dofn = StatefulDoFnWithTimerWithTypo2()
with self.assertRaisesRegexp(
ValueError,
(r'The on_timer callback for TimerSpec\(expiry1\) is not the '
r'specified .on_expiry_1 method for DoFn '
r'StatefulDoFnWithTimerWithTypo2 \(perhaps it was overwritten\?\).')):
UserStateUtils.validate_stateful_dofn(dofn)
# (2) Here, the user forgot to add an on_timer decorator for 'expiry2'
class StatefulDoFnWithTimerWithTypo3(DoFn):
BUFFER_STATE = BagStateSpec('buffer', BytesCoder())
EXPIRY_TIMER_1 = TimerSpec('expiry1', TimeDomain.WATERMARK)
EXPIRY_TIMER_2 = TimerSpec('expiry2', TimeDomain.WATERMARK)
def process(self, element,
timer1=DoFn.TimerParam(EXPIRY_TIMER_1),
timer2=DoFn.TimerParam(EXPIRY_TIMER_2)):
pass
@on_timer(EXPIRY_TIMER_1)
def on_expiry_1(self, buffer_state=DoFn.StateParam(BUFFER_STATE)):
yield 'expired1'
def on_expiry_2(self, buffer_state=DoFn.StateParam(BUFFER_STATE)):
yield 'expired2'
# Use a stable string value for matching.
def __repr__(self):
return 'StatefulDoFnWithTimerWithTypo3'
dofn = StatefulDoFnWithTimerWithTypo3()
with self.assertRaisesRegexp(
ValueError,
(r'DoFn StatefulDoFnWithTimerWithTypo3 has a TimerSpec without an '
r'associated on_timer callback: TimerSpec\(expiry2\).')):
UserStateUtils.validate_stateful_dofn(dofn)
if __name__ == '__main__':
unittest.main()
| 39.138686
| 84
| 0.713726
|
2cb1309ec70b2cf91425ac18412c1d65a1e91ab7
| 832
|
py
|
Python
|
python/linalg/blas.py
|
hmatuschek/linalg
|
b4b8337ba001b24572f13349cc5804a416bfe5a7
|
[
"MIT"
] | 1
|
2016-05-24T15:27:52.000Z
|
2016-05-24T15:27:52.000Z
|
python/linalg/blas.py
|
hmatuschek/linalg
|
b4b8337ba001b24572f13349cc5804a416bfe5a7
|
[
"MIT"
] | null | null | null |
python/linalg/blas.py
|
hmatuschek/linalg
|
b4b8337ba001b24572f13349cc5804a416bfe5a7
|
[
"MIT"
] | null | null | null |
import linalg._blas as _blas
import numpy as np
def dot(x,y):
""" Implements the some dot-product of two vectors x and y. """
assert (x.dtype == y.dtype)
if (np.float64 == x.dtype):
return _blas.ddot(x,y)
else:
raise TypeError("Can not call dot() with vector of type {0}".format(x.dtype));
def nrm2(x):
""" Implements the 2-norm of a vector. """
if (np.float64 == x.dtype):
return _blas.nrm2(x)
else:
raise TypeError("Can not call nrm2() with vector of type {0}".format(x.dtype));
def gemv(alpha, A, x, beta, y):
""" Implements general matrix-vector product so y' = alphaA*x + beta*y. """
assert (A.dtype == x.dtype)
assert (A.dtype == x.dtype)
if (np.float64 == x.dtype):
_blas.dgemv(alpha, A, x, beta, y)
else:
raise TypeError("Can not call gemv() with arrays of type {0}".format(x.dtype));
| 27.733333
| 81
| 0.646635
|
b0f38b73afa2592d3a9583f24b23319f544dcd6c
| 1,447
|
py
|
Python
|
emotion_classification/main.py
|
vijay4313/proxemo
|
98c4e2133047aa8519cc2f482b59565d9160e81a
|
[
"MIT"
] | 23
|
2020-08-18T17:31:54.000Z
|
2022-03-10T10:37:31.000Z
|
emotion_classification/main.py
|
vijay4313/proxemo
|
98c4e2133047aa8519cc2f482b59565d9160e81a
|
[
"MIT"
] | 3
|
2020-11-09T10:18:19.000Z
|
2021-10-31T21:34:37.000Z
|
emotion_classification/main.py
|
vijay4313/proxemo
|
98c4e2133047aa8519cc2f482b59565d9160e81a
|
[
"MIT"
] | 10
|
2020-08-15T16:46:22.000Z
|
2021-07-08T06:54:03.000Z
|
#!/usr/bin/env python
# Title :loader.py
# Author :Venkatraman Narayanan, Bala Murali Manoghar, Vishnu Shashank Dorbala, Aniket Bera, Dinesh Manocha
# Copyright :"Copyright 2020, Proxemo project"
# Version :1.0
# License :"MIT"
# Maintainer :Venkatraman Narayanan, Bala Murali Manoghar
# Email :vnarayan@terpmail.umd.edu, bsaisudh@terpmail.umd.edu
#==============================================================================
from utils import yaml_parser
from runner.trainer import Trainer
import torch
import numpy as np
import os
import argparse
import sys
sys.path.append("../")
# python main.py --settings stgcn
# Load settings
def arg_parser():
"""CLI arg parser.
Returns:
[dict]: CLI args
"""
parser = argparse.ArgumentParser(description='Proxemo Runner')
parser.add_argument('--settings', type=str, default='infer', metavar='s',
help='config file for running the network.')
cli_args = parser.parse_args()
args = yaml_parser.yaml_parser(cli_args.settings)
return args
def main():
"""Main routine."""
args = arg_parser()
gen_args, model_args, data_args = args.values()
# Build model
model = Trainer(gen_args, data_args, model_args)
if gen_args['MODE'] == 'train':
model.train()
elif gen_args['MODE'] == 'test':
model.test()
if __name__ == '__main__':
main()
| 25.839286
| 116
| 0.612992
|
40bcd009a9ad9d3d73d43b028865e970e7b1a7a4
| 14,674
|
py
|
Python
|
electrum/gui/qt/__init__.py
|
akshaynexus/qtum-electrum
|
9fdda51bbc7e541b6f5743f16313d0b8b6123b48
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/__init__.py
|
akshaynexus/qtum-electrum
|
9fdda51bbc7e541b6f5743f16313d0b8b6123b48
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/__init__.py
|
akshaynexus/qtum-electrum
|
9fdda51bbc7e541b6f5743f16313d0b8b6123b48
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import signal
import sys
import traceback
import threading
from typing import Optional, TYPE_CHECKING
from .theme_helper import set_qtum_theme_if_needed
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
import PyQt5.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugin import run_hook
from electrum.base_wizard import GoBack
from electrum.util import (UserCancelled, profiler,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
from .lightning_dialog import LightningDialog
from .watchtower_dialog import WatchtowerDialog
if TYPE_CHECKING:
from electrum.daemon import Daemon
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
@profiler
def __init__(self, config: 'SimpleConfig', daemon: 'Daemon', plugins: 'Plugins'):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum.png"))
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.network_dialog = None
self.lightning_dialog = None
self.watchtower_dialog = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.new_window_signal.connect(self.start_new_window)
set_qtum_theme_if_needed(self.config)
run_hook('init_qt', self)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
network = self.daemon.network
m.addAction(_("Network"), self.show_network_dialog)
if network.lngossip:
m.addAction(_("Lightning Network"), self.show_lightning_dialog)
if network.local_watchtower:
m.addAction(_("Local Watchtower"), self.show_watchtower_dialog)
for window in self.windows:
name = window.wallet.basename()
submenu = m.addMenu(name)
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum"), self.close)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
if self.network_dialog:
self.network_dialog.close()
if self.lightning_dialog:
self.lightning_dialog.close()
if self.watchtower_dialog:
self.watchtower_dialog.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_lightning_dialog(self):
if not self.lightning_dialog:
self.lightning_dialog = LightningDialog(self)
self.lightning_dialog.bring_to_top()
def show_watchtower_dialog(self):
if not self.watchtower_dialog:
self.watchtower_dialog = WatchtowerDialog(self)
self.watchtower_dialog.bring_to_top()
def show_network_dialog(self):
if self.network_dialog:
self.network_dialog.on_update()
self.network_dialog.show()
self.network_dialog.raise_()
return
self.network_dialog = NetworkDialog(self.daemon.network, self.config,
self.network_updated_signal_obj)
self.network_dialog.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + repr(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + repr(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + repr(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage = wizard.create_storage(path)
else:
wizard.run_upgrades(storage)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or storage.get_action():
return
wallet = Wallet(storage, config=self.config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
self.logger.exception('')
return
self.timer.start()
path = self.config.get_wallet_path(use_gui_last_wallet=True)
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
def quit_after_last_window():
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
if self.config.get('persist_daemon'):
return
self.app.quit()
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(quit_after_last_window)
def clean_up():
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
| 39.026596
| 112
| 0.634251
|
9985acbbbab2a160874370678975fc8b2bf77e58
| 224
|
py
|
Python
|
dsql/api/models/__init__.py
|
gustavwilliam/discordSQL
|
95ba62dd9b9d7bffa1e8c2a9fc60d35c916b7b45
|
[
"MIT"
] | 4
|
2021-07-08T12:24:39.000Z
|
2022-03-03T02:51:18.000Z
|
dsql/api/models/__init__.py
|
gustavwilliam/discordSQL
|
95ba62dd9b9d7bffa1e8c2a9fc60d35c916b7b45
|
[
"MIT"
] | 1
|
2021-12-28T00:13:33.000Z
|
2021-12-28T00:13:33.000Z
|
dsql/api/models/__init__.py
|
gustavwilliam/discordSQL
|
95ba62dd9b9d7bffa1e8c2a9fc60d35c916b7b45
|
[
"MIT"
] | null | null | null |
from dsql.api.models.snowflake import Snowflake
from dsql.api.models.component import Component
from dsql.api.models.database import Database
from dsql.api.models.table import Table
from dsql.api.models.column import Column
| 37.333333
| 47
| 0.84375
|
e37bc04bd81e51973229e72b73e57214172c8991
| 4,554
|
py
|
Python
|
pydysofu/fuzz_weaver.py
|
probablytom/automata_with_variance
|
36aabfe76313ecb751d5a33152f7277813ab4afc
|
[
"Apache-2.0"
] | null | null | null |
pydysofu/fuzz_weaver.py
|
probablytom/automata_with_variance
|
36aabfe76313ecb751d5a33152f7277813ab4afc
|
[
"Apache-2.0"
] | null | null | null |
pydysofu/fuzz_weaver.py
|
probablytom/automata_with_variance
|
36aabfe76313ecb751d5a33152f7277813ab4afc
|
[
"Apache-2.0"
] | null | null | null |
"""
Core fuzzing functionality.
@author twsswt
"""
import ast
import copy
import inspect
from core_fuzzers import identity
from inspect import getmembers
from workflow_transformer import WorkflowTransformer
_reference_syntax_trees = dict()
_reference_get_attributes = dict()
def get_reference_syntax_tree(func):
if func not in _reference_syntax_trees:
func_source_lines = inspect.getsourcelines(func)[0]
global_indentation = len(func_source_lines[0]) - len(func_source_lines[0].strip())
for i in range(len(func_source_lines)):
func_source_lines[i] = func_source_lines[i][global_indentation - 1:]
func_source = ''.join(func_source_lines)
_reference_syntax_trees[func] = ast.parse(func_source)
return _reference_syntax_trees[func]
def fuzz_function(reference_function, fuzzer=identity, context=None):
reference_syntax_tree = get_reference_syntax_tree(reference_function)
fuzzed_syntax_tree = copy.deepcopy(reference_syntax_tree)
workflow_transformer = WorkflowTransformer(fuzzer=fuzzer, context=context)
workflow_transformer.visit(fuzzed_syntax_tree)
# Compile the newly mutated function into a module, extract the mutated function code object and replace the
# reference function's code object for this call.
compiled_module = compile(fuzzed_syntax_tree, inspect.getsourcefile(reference_function), 'exec')
reference_function.func_code = compiled_module.co_consts[0]
def fuzz_clazz(clazz, advice):
"""
Applies fuzzers specified in the supplied advice dictionary to methods in the supplied class.
Fuzzing is applied dynamically at runtime by intercepting invocations of __getattribute__ on target objects.
The method requested by the __getattribute__ call is fuzzed using the fuzzer specified in the supplied advice
dictionary (which maps method references to fuzzers)before returning it to the requester. If no fuzzer is specified
for a function then the identity fuzzer is applied in case the method has been previously fuzzed.
A fuzzer value may itself be a dictionary of object filter->fuzzer mappings. In this case, the dictionary is
searched for a filter that matches the target (self) object specified in the __getattribute__ call.
:param clazz : the class to fuzz.
:param advice : the dictionary of method reference->fuzzer mappings to apply for the class.
"""
if clazz not in _reference_get_attributes:
_reference_get_attributes[clazz] = clazz.__getattribute__
def __fuzzed_getattribute__(self, item):
attribute = object.__getattribute__(self, item)
if item[0:2] == '__':
return attribute
elif inspect.ismethod(attribute):
def wrap(*args, **kwargs):
reference_function = attribute.im_func
# Ensure that advice key is unbound method for instance methods.
advice_key = getattr(attribute.im_class, attribute.func_name)
fuzzer = advice.get(advice_key, identity)
fuzz_function(reference_function, fuzzer, self)
# Execute the mutated method.
return reference_function(self, *args, **kwargs)
wrap.func_name = attribute.func_name
return wrap
elif inspect.isfunction(attribute):
def wrap(*args, **kwargs):
reference_function = attribute
advice_key = reference_function
fuzzer = advice.get(advice_key, identity)
fuzz_function(reference_function, fuzzer)
# Execute the mutated function.
return reference_function(*args, **kwargs)
return wrap
else:
return attribute
clazz.__getattribute__ = __fuzzed_getattribute__
def defuzz_class(clazz):
if clazz in _reference_get_attributes:
clazz.__getattribute__ = _reference_get_attributes[clazz]
def defuzz_all_classes():
for clazz in _reference_get_attributes.keys():
defuzz_class(clazz)
def fuzz_module(mod, advice):
"""
Applies fuzzers specified in the supplied advice dictionary to methods in supplied module. All member classes and
functions are inspected in turn, with the specified advice being applied to each.
:param mod : the module to fuzz.
:param advice : the dictionary of method->fuzzer mappings to apply.
"""
for _, member in getmembers(mod):
if inspect.isclass(member):
fuzz_clazz(member, advice)
| 34.5
| 119
| 0.711243
|
0a423d57e4be63a93037ea31ec7196695323ea9f
| 7,253
|
py
|
Python
|
dnnlib/tflib/custom_ops.py
|
cvcomar/gansformer
|
ca698df4da32584a22f1b6a3b7cbdbf4bb0537be
|
[
"MIT"
] | null | null | null |
dnnlib/tflib/custom_ops.py
|
cvcomar/gansformer
|
ca698df4da32584a22f1b6a3b7cbdbf4bb0537be
|
[
"MIT"
] | null | null | null |
dnnlib/tflib/custom_ops.py
|
cvcomar/gansformer
|
ca698df4da32584a22f1b6a3b7cbdbf4bb0537be
|
[
"MIT"
] | null | null | null |
# TensorFlow custom ops builder. Compiles new operations.
import os
import re
import uuid
import hashlib
import tempfile
import shutil
import tensorflow as tf
from tensorflow.python.client import device_lib
# Global options
# ----------------------------------------------------------------------------
cuda_cache_path = os.path.join(os.path.dirname(__file__), "_cudacache")
cuda_cache_version_tag = "v1"
# Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe..
do_not_hash_included_headers = False
# Print status messages to stdout
verbose = True
compiler_bindir_search_path = [
"C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64",
"C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64",
"C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin",
]
# Internal helper functions
# ----------------------------------------------------------------------------
def _find_compiler_bindir():
for compiler_path in compiler_bindir_search_path:
if os.path.isdir(compiler_path):
return compiler_path
return None
def _get_compute_cap(device):
caps_str = device.physical_device_desc
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
major = m.group(1)
minor = m.group(2)
return (major, minor)
def _get_cuda_gpu_arch_string():
gpus = [x for x in device_lib.list_local_devices() if x.device_type == "GPU"]
if len(gpus) == 0:
return "sm_70"
# raise RuntimeError("No GPU devices found")
(major, minor) = _get_compute_cap(gpus[0])
return "sm_%s%s" % (major, minor)
def _run_cmd(cmd):
# print(cmd)
with os.popen(cmd) as pipe:
output = pipe.read()
status = pipe.close()
if status is not None:
raise RuntimeError("NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s" % (cmd, output))
def _prepare_nvcc_cli(opts):
cmd = 'nvcc ' + opts.strip()
cmd += ' --disable-warnings'
cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
# Require that _find_compiler_bindir succeeds on Windows
# Allow nvcc to use whatever is the default on Linux
if os.name == "nt":
raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
else:
cmd += ' --compiler-bindir "%s"' % compiler_bindir
cmd += ' 2>&1'
return cmd
# Main entry point
# ----------------------------------------------------------------------------
_plugin_cache = dict()
def get_plugin(cuda_file):
cuda_file_base = os.path.basename(cuda_file)
cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
# Already in cache?
if cuda_file in _plugin_cache:
return _plugin_cache[cuda_file]
# Setup plugin
if verbose:
print("Setting up TensorFlow plugin '%s': " % cuda_file_base, end = "", flush = True)
try:
# Hash CUDA source
md5 = hashlib.md5()
with open(cuda_file, "rb") as f:
md5.update(f.read())
md5.update(b"\n")
# Compile if not already compiled
tf_ver = float(".".join(tf.__version__.split(".")[:-1]))
bin_file_ext = '.dll' if os.name == 'nt' else '.so'
bin_file = os.path.join(cuda_cache_path, cuda_file_name + "_{}_".format(tf_ver) + bin_file_ext) # + '_' + md5.hexdigest()
if not os.path.isfile(bin_file):
# Hash headers included by the CUDA code by running it through the preprocessor
if not do_not_hash_included_headers:
if verbose:
print("Preprocessing... ", end = "", flush = True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
_run_cmd(_prepare_nvcc_cli('"%s" --preprocess -std=c++11 -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
with open(tmp_file, 'rb') as f:
bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
for ln in f:
if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
ln = ln.replace(bad_file_str, good_file_str)
md5.update(ln)
md5.update(b'\n')
# Select compiler options
compile_opts = ''
if os.name == 'nt':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
elif os.name == 'posix':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=%s\'' % (0)
else:
assert False # not Windows or Linux
compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
compile_opts += ' --use_fast_math'
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
# Hash build configuration
md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
# if not os.path.isfile(bin_file):
if verbose:
print("Compiling... ", end = "", flush = True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
_run_cmd(nvcc_cmd + ' "%s" --shared -std=c++11 -DNDEBUG -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
os.makedirs(cuda_cache_path, exist_ok = True)
intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
shutil.copyfile(tmp_file, intermediate_file)
os.rename(intermediate_file, bin_file) # atomic
# Load
if verbose:
print("Loading... ", end = "", flush = True)
plugin = tf.load_op_library(bin_file)
# Add to cache
_plugin_cache[cuda_file] = plugin
if verbose:
print("Done.", flush = True)
return plugin
except:
if verbose:
print("Failed!", flush = True)
raise
| 43.172619
| 153
| 0.587067
|
2749c4fb8a814492f411eb03fce9ded1289aba9b
| 1,350
|
py
|
Python
|
datasets/NewsgroupsArr.py
|
rist-ro/argo
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
[
"MIT"
] | 4
|
2020-12-07T19:13:13.000Z
|
2022-01-30T18:52:18.000Z
|
datasets/NewsgroupsArr.py
|
rist-ro/argo
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
[
"MIT"
] | 12
|
2020-09-25T22:41:28.000Z
|
2022-02-09T23:46:34.000Z
|
datasets/NewsgroupsArr.py
|
rist-ro/argo
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
[
"MIT"
] | 2
|
2021-03-02T18:31:04.000Z
|
2021-03-02T21:56:43.000Z
|
import os
os.environ['NUMEXPR_MAX_THREADS'] = '20'
from word_embedding.test.core.load_20newsgroup import read_20newsgroup
from .AlphaDatasetArr import AlphaDatasetArr
NPROCS = 4
class NewsgroupsArr(AlphaDatasetArr):
default_params = {
'split_seed' : 42,
}
def __init__(self, params):
super().__init__(NewsgroupsArr.process_params(params))
random_state = params.get('split_seed', 42)
train_data, train_target, validation_data, validation_target, test_data, test_target = read_20newsgroup(
ratio_datasets=[0.7, 0.15, 0.15], random_state=random_state)
self._train_set_x, self._train_set_y = self._preprocess_arrays(train_data, train_target)
self._validation_set_x, self._validation_set_y = self._preprocess_arrays(validation_data, validation_target)
self._test_set_x, self._test_set_y = self._preprocess_arrays(test_data, test_target)
self._set_shapes(n_samples_train = self._train_set_x.shape[0], n_labels = 20)
def dataset_id(self, params):
"""
This method interprets the parameters and generate an id
"""
NewsgroupsArr.check_params_impl(params)
_id = '20newsgroupsArr'
_id += AlphaDatasetArr.dataset_id(self, params)
_id += '-s{:d}'.format(params['split_seed'])
return _id
| 27.55102
| 116
| 0.702963
|
007c94d0441674c13f741b40ff733d72957cbf00
| 5,920
|
py
|
Python
|
gnuradio-3.7.13.4/gr-qtgui/apps/plot_time_base.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | 1
|
2021-03-09T07:32:37.000Z
|
2021-03-09T07:32:37.000Z
|
gnuradio-3.7.13.4/gr-qtgui/apps/plot_time_base.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | null | null | null |
gnuradio-3.7.13.4/gr-qtgui/apps/plot_time_base.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os, sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
try:
import scipy
except ImportError:
print "Error: Scipy required (www.scipy.org)."
sys.exit(1)
try:
from gnuradio.qtgui.plot_form import *
from gnuradio.qtgui.plot_base import *
except ImportError:
from plot_form import *
from plot_base import *
class plot_base(gr.top_block):
def __init__(self, filelist, samp_rate, start,
nsamples, max_nsamples,
auto_scale):
gr.top_block.__init__(self)
self._filelist = filelist
self._samp_rate = samp_rate
self._center_freq = 0
self._start = start
self._max_nsamps = max_nsamples
self._nsigs = len(self._filelist)
self._auto_scale = auto_scale
self._nsamps = nsamples
self._is_setup = False
self._y_min = -20
self._y_max = 20
self._y_range = 2
self._y_value = 1
self.gui_y_axis = None
self.qapp = QtGui.QApplication(sys.argv)
def setup(self):
self.skip = blocks.skiphead(self.dsize, self._start)
n = 0
self.srcs = list()
self._data_min = sys.maxint
self._data_max = -sys.maxint - 1
for f in self._filelist:
data,_min,_max = self.read_samples(f, self._start, self._nsamps)
if(_min < self._data_min):
self._data_min = _min
if(_max > self._data_max):
self._data_max = _max
self.srcs.append(self.src_type(data))
# Set default labels based on file names
fname = f.split("/")[-1]
if(type(self.gui_snk) == qtgui.time_sink_c_sptr):
self.gui_snk.set_line_label(n, "Re{{{0}}}".format(fname))
self.gui_snk.set_line_label(n+1, "Im{{{0}}}".format(fname))
n += 2
else:
self.gui_snk.set_line_label(n, "{0}".format(fname))
n += 1
self.connect(self.srcs[0], self.skip)
self.connect(self.skip, (self.gui_snk, 0))
for i,s in enumerate(self.srcs[1:]):
self.connect(s, (self.gui_snk, i+1))
self.gui_snk.set_update_time(0)
self.gui_snk.enable_menu(False)
self.auto_scale(self._auto_scale)
# Get Python Qt references
pyQt = self.gui_snk.pyqwidget()
self.pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self._is_setup = True
def is_setup(self):
return self._is_setup
def set_y_axis(self, y_min, y_max):
self.gui_snk.set_y_axis(y_min, y_max)
return y_min, y_max
def get_gui(self):
if(self.is_setup()):
return self.pyWin
else:
return None
def reset(self, newstart, newnsamps):
self.stop()
self.wait()
self._start = newstart
self._data_min = sys.maxint
self._data_max = -sys.maxint - 1
for s,f in zip(self.srcs, self._filelist):
data,_min,_max = self.read_samples(f, self._start, newnsamps)
if(_min < self._data_min):
self._data_min = _min
if(_max > self._data_max):
self._data_max = _max
s.set_data(data)
if(len(data) < newnsamps):
newnsamps = len(data)
self.auto_scale(self._auto_scale)
self._nsamps = newnsamps
self.gui_snk.set_nsamps(self._nsamps)
self.start()
def auto_scale(self, state):
if(state > 0):
self.gui_snk.set_y_axis(self._data_min, self._data_max)
self._auto_scale = True
self._y_value = self._data_max
self._y_range = self._data_max - self._data_min
self._y_min = 10*self._data_min
self._y_max = 10*self._data_max
if(self.gui_y_axis):
self.gui_y_axis(self._data_min, self._data_max)
else:
self._auto_scale = False
def setup_options(desc):
parser = OptionParser(option_class=eng_option, description=desc,
conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=1000000,
help="Set the number of samples to display [default=%default]")
parser.add_option("-S", "--start", type="int", default=0,
help="Starting sample number [default=%default]")
parser.add_option("-r", "--sample-rate", type="eng_float", default=1.0,
help="Set the sample rate of the signal [default=%default]")
parser.add_option("", "--no-auto-scale", action="store_true", default=False,
help="Do not auto-scale the plot [default=%default]")
(options,args) = parser.parse_args()
if(len(args) < 1):
parser.print_help()
sys.exit(0)
return (options,args)
| 32
| 85
| 0.609291
|
ab7c70b9581b8d9b3dd5b8dfa3ddecfb2981c82e
| 5,201
|
py
|
Python
|
voice_similarity/analyzer.py
|
itanium-R/rcc_winter_hackathon_2020
|
cecd43d8b89d2d656e3b11ca5bdfb35965451d37
|
[
"MIT"
] | null | null | null |
voice_similarity/analyzer.py
|
itanium-R/rcc_winter_hackathon_2020
|
cecd43d8b89d2d656e3b11ca5bdfb35965451d37
|
[
"MIT"
] | null | null | null |
voice_similarity/analyzer.py
|
itanium-R/rcc_winter_hackathon_2020
|
cecd43d8b89d2d656e3b11ca5bdfb35965451d37
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pyaudio, wave
import os, sys, gc, time, threading, math
import numpy as np
from lib.record import *
from engine import *
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
class Detecation(Recording):
def __init__(self):
## -----*----- コンストラクタ -----*----- ##
super().__init__()
# 親クラスのstreamとは異なる(Float32,rateが2倍)
self.f_stream = self._pa.open(
format=pyaudio.paFloat32,
channels=self.settings['channels'],
rate=self.settings['rate'] * 2,
input=True,
output=False,
frames_per_buffer=self.settings['chunk']
)
# 立ち上がり・下がり検出数
self.cnt_edge = {'up': 0, 'down': 0}
# 音量・閾値などの状態保管
self.state = {'amp': 0, 'total': 0, 'cnt': 0, 'border': 9999, 'average': 0}
# コンソール出力
self.console = Console('./config/outfmt.txt')
self.color = 90
def start(self):
## -----*----- 検出スタート -----*----- ##
time.sleep(self.settings['past_second'])
# 閾値の更新を行うサブスレッドの起動
self.thread = threading.Thread(target=self.update_border)
self.thread.start()
self.pastTime = time.time()
while not self.is_exit:
try:
if time.time() - self.pastTime > 0.5:
self.reset_state()
self.state['cnt'] += 1
self.detection()
sys.stdout.flush()
except KeyboardInterrupt:
os.system('clear')
self.is_exit = True
def detection(self):
## -----*----- 立ち上がり・立ち下がり検出 -----*----- ##
voiceData = np.fromstring(self.f_stream.read(self.settings['chunk'], exception_on_overflow=False), np.float32)
voiceData *= np.hanning(self.settings['chunk'])
# 振幅スペクトル(0~8000[Hz])
x = np.fft.fft(voiceData)
# パワースペクトル
amplitudeSpectrum = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in x]
# バンドパスフィルタ(100~5000[Hz])
amplitudeSpectrum = amplitudeSpectrum[
int((self.settings['chunk'] / (self.settings['rate'] * 2)) * 100):
int((self.settings['chunk'] / (self.settings['rate'] * 2)) * 5000)]
# Amp値・平均値の算出
self.state['amp'] = sum(amplitudeSpectrum)
self.state['total'] += self.state['amp']
self.state['average'] = self.state['total'] / self.state['cnt']
# コンソール出力
self.console.draw(int(self.state['average']), int(self.state['amp']), int(self.state['border']),
'\033[{0}m録音中\033[0m'.format(self.color), *self.meter(), str(self.similarity) + '%')
# 立ち上がり検出
if self.up_edge() and self.record_end.is_set():
self.record_start.set()
self.record_end.clear()
self.color = 32
self.state['border'] = self.state['average']
if self.down_edge() and (not self.record_end.is_set()):
self.record_start.clear()
self.reset_state()
def up_edge(self):
## -----*----- 立ち上がり検出 -----*----- ##
if not self.record_start.is_set():
if self.state['amp'] >= self.state['border']:
self.cnt_edge['up'] += 1
if self.cnt_edge['up'] > 5:
return True
return False
def down_edge(self):
## -----*----- 立ち下がり検出 -----*----- ##
if self.record_start.is_set():
if self.state['average'] <= self.state['border']:
self.cnt_edge['down'] += 1
if self.cnt_edge['down'] > 10:
self.cnt_edge['up'] = self.cnt_edge['down'] = 0
return True
return False
def reset_state(self):
## -----*----- 状態のリセット -----*----- ##
self.state['total'] = self.state['average'] * 15
self.state['cnt'] = 15
if self.state['average'] >= self.state['amp']:
self.cnt_edge['up'] = 0
self.color = 90
self.pastTime = time.time()
def update_border(self):
## -----*----- 閾値の更新 -----*----- ##
offset = range(50, 201, 10)
while not self.is_exit:
time.sleep(0.2)
if self.cnt_edge['up'] < 3 and not self.record_start.is_set():
if int(self.state['average'] / 20) > len(offset) - 1:
i = len(offset) - 1
else:
i = int(self.state['average'] / 20)
self.state['border'] = pow(10, 1.13) * pow(self.state['average'], 0.72)
def meter(self):
## -----*----- 音量メーター生成 -----*----- ##
meter = [''] * 3
keys = ['average', 'amp', 'border']
for i in range(3):
for j in range(int(self.state[keys[i]] / 20 + 3)):
meter[i] += '■'
if self.record_start.is_set():
if self.state['average'] >= self.state['border']:
meter[0] = '\033[94m' + meter[0] + '\033[0m'
elif self.state['amp'] >= self.state['border']:
meter[1] = '\033[94m' + meter[1] + '\033[0m'
return meter
if __name__ == '__main__':
detection = Detecation()
detection.start()
| 36.370629
| 118
| 0.502403
|
7c6f40233db706cb6f638c1d908279877c518fd2
| 2,439
|
py
|
Python
|
pmgr/param.py
|
mcb64/pmgr
|
70b372385ed8e67b30ecaa20e1ae356c85222256
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
pmgr/param.py
|
mcb64/pmgr
|
70b372385ed8e67b30ecaa20e1ae356c85222256
|
[
"BSD-3-Clause-LBNL"
] | 13
|
2018-05-10T00:06:48.000Z
|
2021-07-14T21:47:20.000Z
|
pmgr/param.py
|
mcb64/pmgr
|
70b372385ed8e67b30ecaa20e1ae356c85222256
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2018-07-31T20:11:09.000Z
|
2020-08-17T20:14:28.000Z
|
from PyQt5 import QtCore, QtGui
import pwd, os
AUTH_FILE = "/reg/g/pcds/pyps/config/%s/pmgr.auth"
params = None
def equal(v1, v2):
try:
if type(v1) == float:
# I hate floating point. OK, we need to be "close", but if we are *at* zero
# the "close" test fails!
return v1 == v2 or abs(v1 - v2) < (abs(v1) + abs(v2)) * 1e-12
else:
return v1 == v2
except:
return False
class param_structure(object):
def __init__(self):
self.myuid = pwd.getpwuid(os.getuid())[0]
self.user = None
self.almond = QtGui.QColor(255,235,205)
self.almond.name = "almond"
self.white = QtGui.QColor(255,255,255)
self.white.name = "white"
self.gray = QtGui.QColor(160,160,160)
self.gray.name = "gray"
self.ltgray = QtGui.QColor(224,224,224)
self.ltgray.name = "ltgray"
self.ltblue = QtGui.QColor(0, 255,255)
self.ltblue.name = "ltblue"
self.blue = QtGui.QColor(QtCore.Qt.blue)
self.blue.name = "blue"
self.red = QtGui.QColor(QtCore.Qt.red)
self.red.name = "red"
self.black = QtGui.QColor(QtCore.Qt.black)
self.black.name = "black"
self.purple = QtGui.QColor(204, 0, 102)
self.purple.name = "purple"
self.cfgdialog = None
self.colusedialog = None
self.colsavedialog = None
self.deriveddialog = None
self.confirmdialog = None
self.settings = ("SLAC", "ParamMgr")
self.debug = False
self.applyOK = False
self.ui = None
self.objmodel = None
self.cfgmodel = None
self.db = None
self.pobj = None
self.hutch = None
self.table = None
self.PROTECTED = 0
self.MANUAL = 1
self.AUTO = 2
self.catenum = ["Protected", "Manual", "Auto"] # Database names.
self.setCatEnum(["Protected", "Manual"]) # Displayed names.
def setCatEnum(self, l):
self.catenum2 = l
if self.ui != None:
self.ui.actionProtected.setText("Show " + l[0])
self.ui.actionManual.setText("Show " + l[1])
def setTable(self, v):
self.table = v
def setHutch(self, v):
self.hutch = v
lines = open(AUTH_FILE % v).readlines()
self.auth_users = [l.strip() for l in lines]
| 31.675325
| 88
| 0.547765
|
1abc09d0e7c352b2ef3ac9113e8bc73f8cd14662
| 1,368
|
py
|
Python
|
chembl_webservices/resources/source.py
|
chembl/chembl_new_webservices
|
59fb52665cc1fc09a495d9a8c118687ddd0ad781
|
[
"Apache-2.0"
] | 16
|
2015-02-20T15:54:56.000Z
|
2022-03-04T15:33:11.000Z
|
chembl_webservices/resources/source.py
|
chembl/chembl_webservices_2
|
59fb52665cc1fc09a495d9a8c118687ddd0ad781
|
[
"Apache-2.0"
] | 144
|
2015-02-18T22:14:18.000Z
|
2022-03-07T13:01:20.000Z
|
chembl_webservices/resources/source.py
|
chembl/chembl_new_webservices
|
59fb52665cc1fc09a495d9a8c118687ddd0ad781
|
[
"Apache-2.0"
] | 5
|
2015-03-03T12:58:29.000Z
|
2020-11-03T21:16:20.000Z
|
__author__ = 'mnowotka'
from chembl_webservices.core.utils import NUMBER_FILTERS, CHAR_FILTERS
from chembl_webservices.core.resource import ChemblModelResource
from chembl_webservices.core.meta import ChemblResourceMeta
from chembl_webservices.core.serialization import ChEMBLApiSerializer
try:
from chembl_compatibility.models import Source
except ImportError:
from chembl_core_model.models import Source
from chembl_webservices.core.fields import monkeypatch_tastypie_field
monkeypatch_tastypie_field()
# ----------------------------------------------------------------------------------------------------------------------
class SourceResource(ChemblModelResource):
class Meta(ChemblResourceMeta):
queryset = Source.objects.all()
resource_name = 'source'
collection_name = 'sources'
serializer = ChEMBLApiSerializer(resource_name, {collection_name: resource_name})
filtering = {
'src_description': CHAR_FILTERS,
'src_id': NUMBER_FILTERS,
'src_short_name': CHAR_FILTERS,
}
ordering = [field for field in filtering.keys() if not ('comment' in field or 'description' in field)]
excludes = ['default_doc_id', 'default_loadtype']
# ----------------------------------------------------------------------------------------------------------------------
| 40.235294
| 120
| 0.616959
|
95dc1f888eb490472b7d76b3037663652fd74a66
| 697
|
py
|
Python
|
core/urls.py
|
GDSC-BUK/sc-22.forum-service
|
2202e61003f7b23f2871465e4a110f9083595f5b
|
[
"MIT"
] | null | null | null |
core/urls.py
|
GDSC-BUK/sc-22.forum-service
|
2202e61003f7b23f2871465e4a110f9083595f5b
|
[
"MIT"
] | null | null | null |
core/urls.py
|
GDSC-BUK/sc-22.forum-service
|
2202e61003f7b23f2871465e4a110f9083595f5b
|
[
"MIT"
] | null | null | null |
from django.urls import path
from core.api import (
ReplyDiscussionAPI,
RetrieveUpdateDestroyReplyAPI,
RetriveUpdateDestroyDiscussionAPI,
StartDiscussionAPI,
)
app_name = "core"
urlpatterns = [
path("discussion/", StartDiscussionAPI.as_view(), name="start_discussion"),
path(
"discussion/<uuid:discussion_id>/",
RetriveUpdateDestroyDiscussionAPI.as_view(),
name="rud_discussion",
),
path(
"reply/<uuid:discussion_id>/new/",
ReplyDiscussionAPI.as_view(),
name="reply_discussion",
),
path(
"reply/<uuid:reply_id>/",
RetrieveUpdateDestroyReplyAPI.as_view(),
name="rud_reply",
),
]
| 23.233333
| 79
| 0.651363
|
cb379774c3e3951844d9879b11fb80c58ec330aa
| 6,215
|
py
|
Python
|
data_utils.py
|
ggsonic/mellotron
|
8e93e4c814d984f79e758a8ae57e86d6503eaceb
|
[
"BSD-3-Clause"
] | null | null | null |
data_utils.py
|
ggsonic/mellotron
|
8e93e4c814d984f79e758a8ae57e86d6503eaceb
|
[
"BSD-3-Clause"
] | null | null | null |
data_utils.py
|
ggsonic/mellotron
|
8e93e4c814d984f79e758a8ae57e86d6503eaceb
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import os
import re
import numpy as np
import torch
import torch.utils.data
import librosa
import layers
from utils import load_wav_to_torch, load_filepaths_and_text
from text import text_to_sequence, cmudict
from yin import compute_yin
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio, text and speaker ids
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms and f0s from audio files.
"""
def __init__(self, audiopaths_and_text, hparams, speaker_ids=None):
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
self.text_cleaners = hparams.text_cleaners
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.stft = layers.TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
self.sampling_rate = hparams.sampling_rate
self.filter_length = hparams.filter_length
self.hop_length = hparams.hop_length
self.f0_min = hparams.f0_min
self.f0_max = hparams.f0_max
self.harm_thresh = hparams.harm_thresh
self.p_arpabet = hparams.p_arpabet
self.cmudict = None
if hparams.cmudict_path is not None:
self.cmudict = cmudict.CMUDict(hparams.cmudict_path)
self.speaker_ids = speaker_ids
if speaker_ids is None:
self.speaker_ids = self.create_speaker_lookup_table(self.audiopaths_and_text)
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def create_speaker_lookup_table(self, audiopaths_and_text):
speaker_ids = np.sort(np.unique([x[2] for x in audiopaths_and_text]))
d = {int(speaker_ids[i]): i for i in range(len(speaker_ids))}
return d
def get_f0(self, audio, sampling_rate=22050, frame_length=1024,
hop_length=256, f0_min=100, f0_max=300, harm_thresh=0.1):
f0, harmonic_rates, argmins, times = compute_yin(
audio, sampling_rate, frame_length, hop_length, f0_min, f0_max,
harm_thresh)
pad = int((frame_length / hop_length) / 2)
f0 = [0.0] * pad + f0 + [0.0] * pad
f0 = np.array(f0, dtype=np.float32)
return f0
def get_data(self, audiopath_and_text):
audiopath, text, speaker = audiopath_and_text
text = self.get_text(text)
mel, f0 = self.get_mel_and_f0(audiopath)
speaker_id = self.get_speaker_id(speaker)
return (text, mel, speaker_id, f0)
def get_speaker_id(self, speaker_id):
return torch.IntTensor([self.speaker_ids[int(speaker_id)]])
def get_mel_and_f0(self, filepath):
audio, sampling_rate = load_wav_to_torch(filepath)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
f0 = self.get_f0(audio.cpu().numpy(), self.sampling_rate,
self.filter_length, self.hop_length, self.f0_min,
self.f0_max, self.harm_thresh)
f0 = torch.from_numpy(f0)[None]
f0 = f0[:, :melspec.size(1)]
return melspec, f0
def get_text(self, text):
text_norm = torch.IntTensor(
text_to_sequence(text, self.text_cleaners, self.cmudict))
return text_norm
def __getitem__(self, index):
return self.get_data(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded, gate padded and speaker ids
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
speaker_ids = torch.LongTensor(len(batch))
f0_padded = torch.FloatTensor(len(batch), 1, max_target_len)
f0_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
speaker_ids[i] = batch[ids_sorted_decreasing[i]][2]
f0 = batch[ids_sorted_decreasing[i]][3]
f0_padded[i, :, :f0.size(1)] = f0
model_inputs = (text_padded, input_lengths, mel_padded, gate_padded,
output_lengths, speaker_ids, f0_padded)
return model_inputs
| 39.08805
| 94
| 0.654224
|
0958c18d0493de42abe4b188e76e9cce9ba5ea42
| 5,249
|
py
|
Python
|
tools/repl_test.py
|
fluency03/deno
|
013c82e700111c79d3646fa7b150f48e142d10de
|
[
"MIT"
] | null | null | null |
tools/repl_test.py
|
fluency03/deno
|
013c82e700111c79d3646fa7b150f48e142d10de
|
[
"MIT"
] | null | null | null |
tools/repl_test.py
|
fluency03/deno
|
013c82e700111c79d3646fa7b150f48e142d10de
|
[
"MIT"
] | null | null | null |
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import os
from subprocess import CalledProcessError, PIPE, Popen
import sys
import time
from util import build_path, executable_suffix, green_ok
class Repl(object):
def __init__(self, deno_exe):
self.deno_exe = deno_exe
self._warm_up()
def _warm_up(self):
# This may output an error message about the history file (ignore it).
self.input("")
def input(self, *lines, **kwargs):
exit_ = kwargs.pop("exit", True)
sleep_ = kwargs.pop("sleep", 0)
p = Popen([self.deno_exe, "-A"], stdout=PIPE, stderr=PIPE, stdin=PIPE)
try:
# Note: The repl takes a >100ms until it's ready.
time.sleep(sleep_)
for line in lines:
p.stdin.write(line.encode("utf-8") + b'\n')
p.stdin.flush()
time.sleep(sleep_)
if exit_:
p.stdin.write(b'deno.exit(0)\n')
else:
time.sleep(1) # wait to be killed by js
out, err = p.communicate()
except CalledProcessError as e:
p.kill()
p.wait()
raise e
retcode = p.poll()
# Ignore Windows CRLF (\r\n).
return out.replace('\r\n', '\n'), err.replace('\r\n', '\n'), retcode
def run(self):
print('repl_test.py')
test_names = [name for name in dir(self) if name.startswith("test_")]
for t in test_names:
self.__getattribute__(t)()
sys.stdout.write(".")
sys.stdout.flush()
print(' {}\n'.format(green_ok()))
def test_console_log(self):
out, err, code = self.input("console.log('hello')", "'world'")
assertEqual(out, 'hello\nundefined\nworld\n')
assertEqual(err, '')
assertEqual(code, 0)
def test_exit_command(self):
out, err, code = self.input("exit", "'ignored'", exit=False)
assertEqual(out, '')
assertEqual(err, '')
assertEqual(code, 0)
def test_help_command(self):
out, err, code = self.input("help")
expectedOut = '\n'.join([
"exit Exit the REPL",
"help Print this help message",
"",
])
assertEqual(out, expectedOut)
assertEqual(err, '')
assertEqual(code, 0)
def test_function(self):
out, err, code = self.input("deno.writeFileSync")
assertEqual(out, '[Function: writeFileSync]\n')
assertEqual(err, '')
assertEqual(code, 0)
def test_multiline(self):
out, err, code = self.input("(\n1 + 2\n)")
assertEqual(out, '3\n')
assertEqual(err, '')
assertEqual(code, 0)
# This should print error instead of wait for input
def test_eval_unterminated(self):
out, err, code = self.input("eval('{')")
assertEqual(out, '')
assert "Unexpected end of input" in err
assertEqual(code, 0)
def test_reference_error(self):
out, err, code = self.input("not_a_variable")
assertEqual(out, '')
assert "not_a_variable is not defined" in err
assertEqual(code, 0)
def test_set_timeout(self):
out, err, code = self.input(
"setTimeout(() => { console.log('b'); deno.exit(0); }, 10)",
"'a'",
exit=False)
assertEqual(out, '1\na\nb\n')
assertEqual(err, '')
assertEqual(code, 0)
def test_set_timeout_interlaced(self):
out, err, code = self.input(
"setTimeout(() => console.log('a'), 1000)",
"setTimeout(() => console.log('b'), 600)",
sleep=0.8)
assertEqual(out, '1\n2\na\nb\n')
assertEqual(err, '')
assertEqual(code, 0)
def test_async_op(self):
out, err, code = self.input(
"fetch('http://localhost:4545/tests/001_hello.js')" +
".then(res => res.text()).then(console.log)",
sleep=1)
assertEqual(out, 'Promise {}\nconsole.log("Hello World");\n\n')
assertEqual(err, '')
assertEqual(code, 0)
def test_syntax_error(self):
out, err, code = self.input("syntax error")
assertEqual(out, '')
assert "Unexpected identifier" in err
assertEqual(code, 0)
def test_type_error(self):
out, err, code = self.input("console()")
assertEqual(out, '')
assert "console is not a function" in err
assertEqual(code, 0)
def test_variable(self):
out, err, code = self.input("var a = 123;", "a")
assertEqual(out, 'undefined\n123\n')
assertEqual(err, '')
assertEqual(code, 0)
def test_lexical_scoped_variable(self):
out, err, code = self.input("let a = 123;", "a")
assertEqual(out, 'undefined\n123\n')
assertEqual(err, '')
assertEqual(code, 0)
def assertEqual(left, right):
if left != right:
raise AssertionError("{} != {}".format(repr(left), repr(right)))
def repl_tests(deno_exe):
Repl(deno_exe).run()
def main():
deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
repl_tests(deno_exe)
if __name__ == "__main__":
main()
| 31.244048
| 78
| 0.560107
|
129c611f2361887a2e27b25d4a9f1b0b4779d0bc
| 8,302
|
py
|
Python
|
django_auth_lti/middleware.py
|
jfmcoronel/django-auth-lti
|
1dc4a9c11d4c389769149fc43d2d99387b70162f
|
[
"Apache-2.0"
] | 23
|
2015-04-23T02:09:40.000Z
|
2022-01-21T20:53:20.000Z
|
django_auth_lti/middleware.py
|
jfmcoronel/django-auth-lti
|
1dc4a9c11d4c389769149fc43d2d99387b70162f
|
[
"Apache-2.0"
] | 22
|
2015-04-10T21:21:02.000Z
|
2019-10-28T16:05:26.000Z
|
django_auth_lti/middleware.py
|
jfmcoronel/django-auth-lti
|
1dc4a9c11d4c389769149fc43d2d99387b70162f
|
[
"Apache-2.0"
] | 18
|
2015-02-05T20:10:16.000Z
|
2021-09-22T12:40:41.000Z
|
import logging
import json
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from .timer import Timer
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
MiddlewareMixin = object
logger = logging.getLogger(__name__)
class LTIAuthMiddleware(MiddlewareMixin):
"""
Middleware for authenticating users via an LTI launch URL.
If the request is an LTI launch request, then this middleware attempts to
authenticate the username and signature passed in the POST data.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
If the request is not an LTI launch request, do nothing.
"""
def process_request(self, request):
logger.debug('inside process_request %s' % request.path)
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
logger.debug('improperly configured: requeset has no user attr')
raise ImproperlyConfigured(
"The Django LTI auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the PINAuthMiddleware class.")
if request.method == 'POST' and request.POST.get('lti_message_type') == 'basic-lti-launch-request':
logger.debug('received a basic-lti-launch-request - authenticating the user')
# authenticate and log the user in
with Timer() as t:
user = auth.authenticate(request=request)
logger.debug('authenticate() took %s s' % t.secs)
if user is not None:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
logger.debug('user was successfully authenticated; now log them in')
request.user = user
with Timer() as t:
auth.login(request, user)
logger.debug('login() took %s s' % t.secs)
lti_launch = {
'context_id': request.POST.get('context_id', None),
'context_label': request.POST.get('context_label', None),
'context_title': request.POST.get('context_title', None),
'context_type': request.POST.get('context_type', None),
'custom_canvas_account_id': request.POST.get('custom_canvas_account_id', None),
'custom_canvas_account_sis_id': request.POST.get('custom_canvas_account_sis_id', None),
'custom_canvas_api_domain': request.POST.get('custom_canvas_api_domain', None),
'custom_canvas_course_id': request.POST.get('custom_canvas_course_id', None),
'custom_canvas_course_sectionsissourceids': request.POST.get('custom_canvas_course_sectionsissourceids', '').split(','),
'custom_canvas_enrollment_state': request.POST.get('custom_canvas_enrollment_state', None),
'custom_canvas_membership_roles': request.POST.get('custom_canvas_membership_roles', '').split(','),
'custom_canvas_person_email_sis': request.POST.get('custom_canvas_person_email_sis'),
'custom_canvas_term_name': request.POST.get('custom_canvas_term_name'),
'custom_canvas_user_id': request.POST.get('custom_canvas_user_id', None),
'custom_canvas_user_login_id': request.POST.get('custom_canvas_user_login_id', None),
'launch_presentation_css_url': request.POST.get('launch_presentation_css_url', None),
'launch_presentation_document_target': request.POST.get('launch_presentation_document_target', None),
'launch_presentation_height': request.POST.get('launch_presentation_height', None),
'launch_presentation_locale': request.POST.get('launch_presentation_locale', None),
'launch_presentation_return_url': request.POST.get('launch_presentation_return_url', None),
'launch_presentation_width': request.POST.get('launch_presentation_width', None),
'lis_course_offering_sourcedid': request.POST.get('lis_course_offering_sourcedid', None),
'lis_outcome_service_url': request.POST.get('lis_outcome_service_url', None),
'lis_person_contact_email_primary': request.POST.get('lis_person_contact_email_primary', None),
'lis_person_name_family': request.POST.get('lis_person_name_family', None),
'lis_person_name_full': request.POST.get('lis_person_name_full', None),
'lis_person_name_given': request.POST.get('lis_person_name_given', None),
'lis_person_sourcedid': request.POST.get('lis_person_sourcedid', None),
'lti_message_type': request.POST.get('lti_message_type', None),
'resource_link_description': request.POST.get('resource_link_description', None),
'resource_link_id': request.POST.get('resource_link_id', None),
'resource_link_title': request.POST.get('resource_link_title', None),
'roles': request.POST.get('roles', '').split(','),
'selection_directive': request.POST.get('selection_directive', None),
'tool_consumer_info_product_family_code': request.POST.get('tool_consumer_info_product_family_code', None),
'tool_consumer_info_version': request.POST.get('tool_consumer_info_version', None),
'tool_consumer_instance_contact_email': request.POST.get('tool_consumer_instance_contact_email', None),
'tool_consumer_instance_description': request.POST.get('tool_consumer_instance_description', None),
'tool_consumer_instance_guid': request.POST.get('tool_consumer_instance_guid', None),
'tool_consumer_instance_name': request.POST.get('tool_consumer_instance_name', None),
'tool_consumer_instance_url': request.POST.get('tool_consumer_instance_url', None),
'user_id': request.POST.get('user_id', None),
'user_image': request.POST.get('user_image', None),
}
# If a custom role key is defined in project, merge into existing role list
if hasattr(settings, 'LTI_CUSTOM_ROLE_KEY'):
custom_roles = request.POST.get(settings.LTI_CUSTOM_ROLE_KEY, '').split(',')
lti_launch['roles'] += [_f for _f in custom_roles if _f] # Filter out any empty roles
request.session['LTI_LAUNCH'] = lti_launch
else:
# User could not be authenticated!
logger.warning('user could not be authenticated via LTI params; let the request continue in case another auth plugin is configured')
# Other functions in django-auth-lti expect there to be an LTI attribute on the request object
# This enables backwards compatibility with consumers of this package who still want to use this
# single launch version of LTIAuthMiddleware
setattr(request, 'LTI', request.session.get('LTI_LAUNCH', {}))
if not request.LTI:
logger.warning("Could not find LTI launch parameters")
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
logger.debug('calling the backend %s clean_username with %s' % (backend, username))
username = backend.clean_username(username)
logger.debug('cleaned username is %s' % username)
except AttributeError: # Backend has no clean_username method.
pass
return username
| 59.3
| 148
| 0.648398
|
234e7a5ff826d721ccd6f0e8e5236b662cdeb4eb
| 4,000
|
py
|
Python
|
tests/test_webhooks.py
|
marty-2015/django-hurricane
|
fe05ed1360ad504167aa403c999357eb4f0cdb8b
|
[
"MIT"
] | 30
|
2020-12-23T21:07:42.000Z
|
2022-03-24T17:09:43.000Z
|
tests/test_webhooks.py
|
marty-2015/django-hurricane
|
fe05ed1360ad504167aa403c999357eb4f0cdb8b
|
[
"MIT"
] | 60
|
2021-02-05T13:20:32.000Z
|
2022-03-24T20:56:48.000Z
|
tests/test_webhooks.py
|
marty-2015/django-hurricane
|
fe05ed1360ad504167aa403c999357eb4f0cdb8b
|
[
"MIT"
] | 3
|
2021-02-11T10:46:09.000Z
|
2021-11-04T16:48:15.000Z
|
import requests
from hurricane.testing.drivers import HurricaneServerDriver
from hurricane.testing.testcases import HurricaneWebhookServerTest
class HurricaneWebhookStartServerTests(HurricaneWebhookServerTest):
starting_message = "Started webhook receiver server"
@HurricaneWebhookServerTest.cycle_server
def test_webhook_on_success(self):
hurricane_server = HurricaneServerDriver()
hurricane_server.start_server(
params=["--command", "makemigrations", "--webhook-url", "http://localhost:8074/webhook"]
)
out, err = self.driver.get_output(read_all=True)
hurricane_server.stop_server()
self.assertIn(self.starting_message, out)
self.assertIn("succeeded", out)
@HurricaneWebhookServerTest.cycle_server
def test_webhook_on_failure(self):
hurricane_server = HurricaneServerDriver()
hurricane_server.start_server(
params=["--command", "failingcommand", "--webhook-url", "http://localhost:8074/webhook"]
)
out, err = self.driver.get_output(read_all=True)
hurricane_server.stop_server()
self.assertIn(self.starting_message, out)
self.assertIn("failed", out)
@HurricaneWebhookServerTest.cycle_server
def test_webhook_without_management_commands(self):
hurricane_server = HurricaneServerDriver()
hurricane_server.start_server(params=["--webhook-url", "http://localhost:8074/webhook"])
out, err = self.driver.get_output(read_all=True)
hurricane_server.stop_server()
self.assertIn(self.starting_message, out)
self.assertIn("succeeded", out)
@HurricaneWebhookServerTest.cycle_server
def test_webhook_wrong_url(self):
response = requests.post(
"http://localhost:8074/web", timeout=5, data={"status": "succeeded", "type": "startup"}
)
self.assertEqual(response.status_code, 404)
@HurricaneWebhookServerTest.cycle_server
def test_liveness_webhook(self):
hurricane_server = HurricaneServerDriver()
hurricane_server.start_server(params=["--webhook-url", "http://localhost:8074/webhook"])
response = requests.get("http://localhost:8001/alive", timeout=5)
out, err = self.driver.get_output(read_all=True)
hurricane_server.stop_server()
self.assertEqual(response.status_code, 200)
self.assertIn(self.starting_message, out)
self.assertIn("succeeded", out)
@HurricaneWebhookServerTest.cycle_server
def test_readiness_webhook(self):
hurricane_server = HurricaneServerDriver()
hurricane_server.start_server(params=["--webhook-url", "http://localhost:8074/webhook"])
response = requests.get("http://localhost:8001/ready", timeout=5)
out, err = self.driver.get_output(read_all=True)
hurricane_server.stop_server()
self.assertEqual(response.status_code, 200)
self.assertIn(self.starting_message, out)
self.assertIn("succeeded", out)
@HurricaneWebhookServerTest.cycle_server
def test_readiness_webhook_request_queue_length(self):
hurricane_server = HurricaneServerDriver()
hurricane_server.start_server(params=["--webhook-url", "http://localhost:8074/webhook", "--req-queue-len", "0"])
response = requests.get("http://localhost:8001/ready", timeout=5)
out, err = self.driver.get_output(read_all=True)
hurricane_server.stop_server()
self.assertEqual(response.status_code, 400)
self.assertIn(self.starting_message, out)
self.assertIn("failed", out)
@HurricaneWebhookServerTest.cycle_server
def test_get_webhook_from_registry(self):
from hurricane.webhooks.base import Webhook
from hurricane.webhooks.webhook_types import StartupWebhook
Webhook(code="new_webhook")
StartupWebhook.get_from_registry()
out, err = self.driver.get_output(read_all=True)
self.assertIn(self.starting_message, out)
| 44.444444
| 120
| 0.70825
|
d0756bfa143865dba44009cabd5359203aa39ca0
| 443
|
py
|
Python
|
examples/python/basic/image_filter.py
|
collector-m/cupoch
|
1b2bb3f806695b93d6d0dd87855cf2a4da8d1ce1
|
[
"MIT"
] | 522
|
2020-01-19T05:59:00.000Z
|
2022-03-25T04:36:52.000Z
|
examples/python/basic/image_filter.py
|
collector-m/cupoch
|
1b2bb3f806695b93d6d0dd87855cf2a4da8d1ce1
|
[
"MIT"
] | 87
|
2020-02-23T09:56:48.000Z
|
2022-03-25T13:35:15.000Z
|
examples/python/basic/image_filter.py
|
collector-m/cupoch
|
1b2bb3f806695b93d6d0dd87855cf2a4da8d1ce1
|
[
"MIT"
] | 74
|
2020-01-27T15:33:30.000Z
|
2022-03-27T11:58:22.000Z
|
import cupoch as cph
import numpy as np
img = cph.io.read_image("../../testdata/lena_color.jpg")
img.downsample()
cph.visualization.draw_geometries([img])
img = cph.io.read_image("../../testdata/lena_gray.jpg")
cph.visualization.draw_geometries([img])
g_img = img.filter(cph.geometry.ImageFilterType.Gaussian3)
cph.visualization.draw_geometries([g_img])
b_img = img.bilateral_filter(3, 0.1, 10.0)
cph.visualization.draw_geometries([b_img])
| 29.533333
| 58
| 0.769752
|
d29dc4ed09e3b4e8db05c60edb7effea5fac2829
| 111
|
py
|
Python
|
webapp/settings/local.py
|
araceli24/TimeWorking
|
75e25d3710ff58701d844c57686540ffa7a11604
|
[
"MIT"
] | null | null | null |
webapp/settings/local.py
|
araceli24/TimeWorking
|
75e25d3710ff58701d844c57686540ffa7a11604
|
[
"MIT"
] | 17
|
2018-04-30T08:01:36.000Z
|
2021-06-10T20:25:13.000Z
|
webapp/settings/local.py
|
araceli24/TimeWorking
|
75e25d3710ff58701d844c57686540ffa7a11604
|
[
"MIT"
] | 2
|
2018-05-02T10:29:03.000Z
|
2018-05-02T13:02:30.000Z
|
from .base import *
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
]
INTERNAL_IPS = (
'127.0.0.1',
)
| 11.1
| 19
| 0.540541
|
d6e3c3369154a2e139a4a4eee9ae11a71ad435f6
| 1,714
|
py
|
Python
|
cogs/askthebot.py
|
Samrid-Pandit/Samrids-Bot
|
f0f4dd740111bc328493668899ae047dd68c4776
|
[
"Apache-2.0"
] | 3
|
2020-10-24T08:58:04.000Z
|
2020-12-02T14:59:31.000Z
|
cogs/askthebot.py
|
Samrid-Pandit/Ihihi-Bot
|
f0f4dd740111bc328493668899ae047dd68c4776
|
[
"Apache-2.0"
] | 4
|
2020-10-24T03:58:21.000Z
|
2020-10-28T11:39:32.000Z
|
cogs/askthebot.py
|
Samrid-Pandit/Samrids-Bot
|
f0f4dd740111bc328493668899ae047dd68c4776
|
[
"Apache-2.0"
] | 1
|
2020-10-24T08:58:03.000Z
|
2020-10-24T08:58:03.000Z
|
from discord.ext import commands
import discord
import random
class askthebot(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases = ['gay','gayr8', 'gae'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def gayrate(self, ctx, user: discord.Member = None):
if user == None:
user = ctx.author
gayr8 = random.randrange(1,100)
rate = f"{user.mention}, You are {str(gayr8)}% Gay!"
embed = discord.Embed(title = "Gay Rate", description = rate, colour = discord.Colour.green())
await ctx.send(embed=embed)
@commands.command()
async def anyone(self, ctx,*, input = None):
guild_members = ctx.guild.members
members = [member for member in guild_members if not member.bot]
member = random.choice(members)
if input == None:
await ctx.send(f"{member.mention} is the chosen one!")
else:
await ctx.send(f"{member.mention} is chosen to {input}!")
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def waifu(self, ctx, user: discord.Member = None):
if not user:
user = ctx.author
waifu = random.randrange(1,10)
if waifu == 1 or waifu == 2:
emoji = ":face_vomiting:"
elif waifu == 3 or waifu == 4:
emoji = ":nauseated_face:"
elif waifu == 5 or waifu == 6:
emoji = ":cold_sweat:"
elif waifu == 7:
emoji = ":kissing:"
elif waifu == 8:
emoji = ":smirk:"
elif waifu == 9:
emoji = ":relaxed:"
elif waifu == 10:
emoji = ":heart_eyes:"
rate = f"{user.mention}, You are {str(waifu)}/10 waifu! {emoji}"
embed = discord.Embed(title = "Waifu Rate", description = rate, colour = discord.Colour.red())
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(askthebot(bot))
| 28.566667
| 96
| 0.663361
|
649549cccf3bed113c126ff540f2cd319bf9a7d4
| 18,621
|
py
|
Python
|
pyaf/TS/SignalDecomposition_Cycle.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
pyaf/TS/SignalDecomposition_Cycle.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
pyaf/TS/SignalDecomposition_Cycle.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2016 Antoine Carme <Antoine.Carme@Laposte.net>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from . import Time as tsti
from . import Perf as tsperf
from . import Plots as tsplot
from . import Utils as tsutil
# for timing
import time
class cAbstractCycle:
def __init__(self , trend):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mTrend = trend;
self.mTrend_residue_name = self.mTrend.mOutName + '_residue'
self.mFormula = None;
self.mComplexity = None;
def getCycleResidueName(self):
return self.getCycleName() + "_residue";
def plot(self):
tsplot.decomp_plot(self.mCycleFrame, self.mTimeInfo.mNormalizedTimeColumn,
self.mTrend_residue_name, self.getCycleName() , self.getCycleResidueName());
def check_not_nan(self, sig , name):
#print("check_not_nan");
if(np.isnan(sig).any() or np.isinf(sig).any() ):
logger = tsutil.get_pyaf_logger();
logger.error("CYCLE_RESIDUE_WITH_NAN_IN_SIGNAL" + str(sig));
raise tsutil.Internal_PyAF_Error("CYCLE_COLUMN _FOR_TREND_RESIDUE ['" + name + "'");
pass
def computePerf(self):
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleResidueName()], self.getCycleResidueName())
# self.mCycleFrame.to_csv(self.getCycleResidueName() + ".csv");
self.mCycleFitPerf = tsperf.cPerf();
self.mCycleForecastPerf = tsperf.cPerf();
# self.mCycleFrame[[self.mTrend_residue_name, self.getCycleName()]].to_csv(self.getCycleName() + ".csv");
(lFrameFit, lFrameForecast, lFrameTest) = self.mTimeInfo.cutFrame(self.mCycleFrame);
self.mCycleFitPerf.compute(
lFrameFit[self.mTrend_residue_name], lFrameFit[self.getCycleName()], self.getCycleName())
self.mCycleForecastPerf.compute(
lFrameForecast[self.mTrend_residue_name], lFrameForecast[self.getCycleName()], self.getCycleName())
class cZeroCycle(cAbstractCycle):
def __init__(self , trend):
super().__init__(trend);
self.mFormula = "NoCycle"
self.mComplexity = 0;
def getCycleName(self):
return self.mTrend_residue_name + "_zeroCycle";
def fit(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[self.getCycleName()] = np.zeros_like(self.mTrendFrame[self.mTrend_residue_name])
self.mCycleFrame[self.getCycleResidueName()] = self.mCycleFrame[self.mTrend_residue_name];
self.mOutName = self.getCycleName()
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
df[self.getCycleName()] = np.zeros_like(df[self.mTrend_residue_name]);
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
return df;
class cSeasonalPeriodic(cAbstractCycle):
def __init__(self , trend, date_part):
super().__init__(trend);
self.mDatePart = date_part;
self.mEncodedValueDict = {}
self.mFormula = "Seasonal_" + self.mDatePart;
self.mComplexity = 1;
def getCycleName(self):
return self.mTrend_residue_name + "_Seasonal_" + self.mDatePart;
def hasEnoughData(self, iTimeMin, iTimeMax):
lTimeDelta = iTimeMax - iTimeMin;
lDays = lTimeDelta / np.timedelta64(1,'D');
lSeconds = lTimeDelta / np.timedelta64(1,'s');
if(self.mDatePart == "Hour"):
return (lDays >= 10);
if(self.mDatePart == "Minute"):
lHours = lSeconds // 3600;
return (lHours >= 10);
if(self.mDatePart == "Second"):
lMinutes = lSeconds // 60;
return (lMinutes >= 10);
if(self.mDatePart == "DayOfMonth"):
lMonths = lDays // 30;
return (lMonths >= 10);
if(self.mDatePart == "DayOfWeek"):
lWeeks = lDays // 7;
return (lWeeks >= 10);
if(self.mDatePart == "MonthOfYear"):
lYears = lDays // 360;
return (lYears >= 10);
if(self.mDatePart == "WeekOfYear"):
lYears = lDays // 360;
return (lYears >= 10);
return False;
def fit(self):
assert(self.mTimeInfo.isPhysicalTime());
lHor = self.mTimeInfo.mHorizon;
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
lName = self.getCycleName();
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[lName] = self.mTrendFrame[self.mTime].apply(self.get_date_part);
# we encode only using estimation
lCycleFrameEstim = self.mTimeInfo.getEstimPart(self.mCycleFrame);
lTrendMeanEstim = lCycleFrameEstim[self.mTrend_residue_name].mean();
lGroupBy = lCycleFrameEstim.groupby(by=[lName] , sort=False)[self.mTrend_residue_name].mean();
self.mEncodedValueDict = lGroupBy.to_dict()
self.mDefaultValue = lTrendMeanEstim;
# print("cSeasonalPeriodic_DefaultValue" , self.getCycleName(), self.mDefaultValue);
self.mCycleFrame[lName + '_enc'] = self.mCycleFrame[lName].apply(lambda x : self.mEncodedValueDict.get(x , self.mDefaultValue))
self.mCycleFrame[lName + '_enc'].fillna(lTrendMeanEstim, inplace=True);
self.mCycleFrame[self.getCycleResidueName()] = self.mCycleFrame[self.mTrend_residue_name] - self.mCycleFrame[lName + '_enc'];
self.mCycleFrame[lName + '_NotEncoded'] = self.mCycleFrame[lName];
self.mCycleFrame[lName] = self.mCycleFrame[lName + '_enc'];
self.mOutName = self.getCycleName()
#print("encoding '" + lName + "' " + str(self.mEncodedValueDict));
@tsutil.cMemoize
def get_date_part(self, x):
lDatepartComputer = self.mTimeInfo.get_date_part_value_computer(self.mDatePart)
return lDatepartComputer(x)
@tsutil.cMemoize
def get_date_part_encoding(self, x):
lDatepartComputer = self.mTimeInfo.get_date_part_value_computer(self.mDatePart)
dp = lDatepartComputer(x)
return self.mEncodedValueDict.get(dp , self.mDefaultValue)
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
df[self.getCycleName()] = df[self.mTime].apply(self.get_date_part_encoding);
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
return df;
class cBestCycleForTrend(cAbstractCycle):
def __init__(self , trend, criterion):
super().__init__(trend);
self.mCycleFrame = pd.DataFrame()
self.mCyclePerfDict = {}
self.mBestCycleValueDict = {}
self.mBestCycleLength = None
self.mCriterion = criterion
self.mComplexity = 2;
self.mFormula = "BestCycle"
def getCycleName(self):
return self.mTrend_residue_name + "_bestCycle_by" + self.mCriterion;
def dumpCyclePerfs(self):
print(self.mCyclePerfDict);
def computeBestCycle(self):
# self.dumpCyclePerfs();
lCycleFrameEstim = self.mTimeInfo.getEstimPart(self.mCycleFrame);
self.mDefaultValue = lCycleFrameEstim[self.mTrend_residue_name].mean();
self.mBestCycleLength = None;
lBestCycleIdx = None;
lBestCriterion = None;
if(self.mCyclePerfDict):
for k in sorted(self.mCyclePerfDict.keys()):
# smallest cycles are better
if((lBestCriterion is None) or (self.mCyclePerfDict[k] < lBestCriterion)):
lBestCycleIdx = k;
lBestCriterion = self.mCyclePerfDict[k];
if(self.mOptions.mCycle_Criterion_Threshold is None or
(self.mCyclePerfDict[lBestCycleIdx] < self.mOptions.mCycle_Criterion_Threshold)) :
self.mBestCycleLength = lBestCycleIdx
# print("BEST_CYCLE_PERF" , self.mTrend_residue_name, self.mBestCycleLength)
self.transformDataset(self.mCycleFrame);
pass
def generate_cycles(self):
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name ] = self.mTrendFrame[self.mTrend_residue_name]
lCycleFrameEstim = self.mTimeInfo.getEstimPart(self.mCycleFrame);
self.mDefaultValue = lCycleFrameEstim[self.mTrend_residue_name].mean();
del lCycleFrameEstim;
self.mCyclePerfDict = {}
lMaxRobustCycle = self.mTrendFrame.shape[0]//12;
# print("MAX_ROBUST_CYCLE_LENGTH", self.mTrendFrame.shape[0], lMaxRobustCycle);
lCycleLengths = self.mOptions.mCycleLengths or range(2,lMaxRobustCycle + 1)
lCycleFrame = pd.DataFrame();
lCycleFrame[self.mTrend_residue_name ] = self.mTrendFrame[self.mTrend_residue_name]
for i in lCycleLengths:
if ((i > 1) and (i <= lMaxRobustCycle)):
name_i = self.mTrend_residue_name + '_Cycle';
lCycleFrame[name_i] = self.mCycleFrame[self.mTimeInfo.mRowNumberColumn] % i
lCycleFrameEstim = self.mTimeInfo.getEstimPart(lCycleFrame);
lGroupBy = lCycleFrameEstim.groupby(by=[name_i] , sort=False)[self.mTrend_residue_name].mean();
lEncodedValueDict = lGroupBy.to_dict()
lCycleFrame[name_i + '_enc'] = lCycleFrame[name_i].apply(
lambda x : lEncodedValueDict.get(x , self.mDefaultValue))
self.mBestCycleValueDict[i] = lEncodedValueDict;
lPerf = tsperf.cPerf();
# validate the cycles on the validation part
lValidFrame = self.mTimeInfo.getValidPart(lCycleFrame);
lCritValue = lPerf.computeCriterion(lValidFrame[self.mTrend_residue_name],
lValidFrame[name_i + "_enc"],
self.mCriterion,
"Validation")
self.mCyclePerfDict[i] = lCritValue;
if(self.mOptions.mDebugCycles):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_INTERNAL_CRITERION " + name_i + " " + str(i) + \
" " + self.mCriterion +" " + str(lCritValue))
pass
def fit(self):
# print("cycle_fit" , self.mTrend_residue_name);
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.generate_cycles();
self.computeBestCycle();
self.mOutName = self.getCycleName()
self.mFormula = "Cycle_None"
if(self.mBestCycleLength is not None):
self.mFormula = "Cycle_Length_" + str(self.mBestCycleLength);
self.transformDataset(self.mCycleFrame);
def transformDataset(self, df):
if(self.mBestCycleLength is not None):
lValueCol = df[self.mTimeInfo.mRowNumberColumn].apply(lambda x : x % self.mBestCycleLength);
df['cycle_internal'] = lValueCol;
# print("BEST_CYCLE" , self.mBestCycleLength)
# print(self.mBestCycleValueDict);
lDict = self.mBestCycleValueDict[self.mBestCycleLength];
df[self.getCycleName()] = lValueCol.apply(lambda x : lDict.get(x , self.mDefaultValue));
else:
df[self.getCycleName()] = np.zeros_like(df[self.mTimeInfo.mRowNumberColumn]);
target = df[self.mTrend_residue_name]
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleName()].values , self.getCycleName());
return df;
class cCycleEstimator:
def __init__(self):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mCycleList = {}
def addSeasonal(self, trend, seas_type, resolution):
if(resolution >= self.mTimeInfo.mResolution):
lSeasonal = cSeasonalPeriodic(trend, seas_type);
if(self.mOptions.mActivePeriodics[lSeasonal.mFormula]):
if(lSeasonal.hasEnoughData(self.mTimeInfo.mTimeMin, self.mTimeInfo.mTimeMax)):
self.mCycleList[trend] = self.mCycleList[trend] + [lSeasonal];
pass
def defineCycles(self):
for trend in self.mTrendList:
self.mCycleList[trend] = [];
if(self.mOptions.mActivePeriodics['NoCycle']):
self.mCycleList[trend] = [cZeroCycle(trend)];
if(self.mOptions.mActivePeriodics['BestCycle']):
self.mCycleList[trend] = self.mCycleList[trend] + [
cBestCycleForTrend(trend, self.mOptions.mCycle_Criterion)];
if(self.mTimeInfo.isPhysicalTime()):
# The order used here is mandatory. see filterSeasonals before changing this order.
self.addSeasonal(trend, "MonthOfYear", tsti.cTimeInfo.sRES_MONTH);
self.addSeasonal(trend, "WeekOfYear", tsti.cTimeInfo.sRES_DAY);
self.addSeasonal(trend, "DayOfMonth", tsti.cTimeInfo.sRES_DAY);
self.addSeasonal(trend, "DayOfWeek", tsti.cTimeInfo.sRES_DAY);
self.addSeasonal(trend, "Hour", tsti.cTimeInfo.sRES_HOUR);
self.addSeasonal(trend, "Minute", tsti.cTimeInfo.sRES_MINUTE);
self.addSeasonal(trend, "Second", tsti.cTimeInfo.sRES_SECOND);
for trend in self.mTrendList:
if(len(self.mCycleList[trend]) == 0):
self.mCycleList[trend] = [cZeroCycle(trend)];
for cycle in self.mCycleList[trend]:
cycle.mTrendFrame = self.mTrendFrame;
cycle.mTimeInfo = self.mTimeInfo;
cycle.mOptions = self.mOptions;
def plotCycles(self):
for trend in self.mTrendList:
for cycle in self.mCycleList[trend]:
cycle.plot()
def dumpCyclePerf(self, cycle):
if(self.mOptions.mDebugCycles):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_PERF_DETAIL_COUNT_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mCount) + " %.3f" % (cycle.mCycleForecastPerf.mCount));
logger.debug("CYCLE_PERF_DETAIL_MAPE_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mMAPE)+ " %.3f" % (cycle.mCycleForecastPerf.mMAPE));
logger.debug("CYCLE_PERF_DETAIL_L2_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mL2) + " %.3f" % (cycle.mCycleForecastPerf.mL2));
logger.debug("CYCLE_PERF_DETAIL_R2_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mR2) + " %.3f" % (cycle.mCycleForecastPerf.mR2));
logger.debug("CYCLE_PERF_DETAIL_PEARSONR_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mPearsonR) + " %.3f" % (cycle.mCycleForecastPerf.mPearsonR));
def estimateCycles(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
for trend in self.mTrendList:
lTrend_residue_name = trend.mOutName + '_residue'
self.mCycleFrame[lTrend_residue_name] = self.mTrendFrame[lTrend_residue_name]
for cycle in self.mCycleList[trend]:
start_time = time.time()
cycle.fit();
if(self.mOptions.mDebugPerformance):
cycle.computePerf();
self.dumpCyclePerf(cycle)
self.mCycleFrame[cycle.getCycleName()] = cycle.mCycleFrame[cycle.getCycleName()]
self.mCycleFrame[cycle.getCycleResidueName()] = cycle.mCycleFrame[cycle.getCycleResidueName()]
if(self.mOptions.mDebug):
cycle.check_not_nan(self.mCycleFrame[cycle.getCycleResidueName()].values ,
cycle.getCycleResidueName())
end_time = time.time()
lTrainingTime = round(end_time - start_time , 2);
if(self.mOptions.mDebugProfile):
logger = tsutil.get_pyaf_logger();
logger.info("CYCLE_TRAINING_TIME_IN_SECONDS '" + cycle.mOutName + "' " + str(lTrainingTime))
pass
def filterSeasonals(self):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS_START")
for trend in self.mTrendList:
lPerfs = {}
lTrend_residue_name = trend.mOutName + '_residue'
lCycleList = []
lSeasonals = []
for cycle in self.mCycleList[trend]:
if(isinstance(cycle , cSeasonalPeriodic)):
cycle.computePerf();
lPerfs[cycle.mOutName] = cycle.mCycleForecastPerf.getCriterionValue(self.mOptions.mCycle_Criterion)
lSeasonals = lSeasonals + [cycle]
else:
lCycleList = lCycleList + [cycle]
if(len(lSeasonals) == 0):
return
lBestCriterion = None
lBestSeasonal = None
for (k,cycle) in enumerate(lSeasonals):
lCriterionValue = lPerfs[cycle.mOutName]
if((lBestCriterion is None) or (lCriterionValue < (1.05 * lBestCriterion))):
lBestSeasonal = cycle
lBestCriterion = lCriterionValue;
lCycleList = lCycleList + [lBestSeasonal]
self.mCycleList[trend] = lCycleList
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS " + trend.mOutName + " " + lBestSeasonal.mOutName)
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS_END")
pass
def estimateAllCycles(self):
self.defineCycles();
self.estimateCycles()
if(self.mOptions.mFilterSeasonals):
self.filterSeasonals()
| 45.306569
| 135
| 0.619784
|
192a79387e5fc6493141704c0b6aae70d4b6a9f6
| 822
|
py
|
Python
|
setup.py
|
Nburkhal/lambdata
|
47f079fa3f49506730def1db1a91bbad931fb260
|
[
"MIT"
] | null | null | null |
setup.py
|
Nburkhal/lambdata
|
47f079fa3f49506730def1db1a91bbad931fb260
|
[
"MIT"
] | 6
|
2020-03-24T17:39:48.000Z
|
2021-09-08T01:24:07.000Z
|
setup.py
|
Nburkhal/lambdata
|
47f079fa3f49506730def1db1a91bbad931fb260
|
[
"MIT"
] | null | null | null |
"""
A collection of Data Science helper functions
@author: Nick Burkhalter
@url: https://github.com/Nburkhal/lambdata
"""
# Always prefer setuptools over distutils
import setuptools
REQUIRED = [
'numpy',
'pandas',
'scikit-learn',
'category-encoders',
'matplotlib',
'seaborn',
'pdpbox',
'shap'
]
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='lambdata_nburkhal',
version = '0.0.6',
description='A collection of Data Science helper functions',
long_description= long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=REQUIRED,
author = 'Nick Burkhalter',
author_email = 'nburkhal.nb@gmail.com',
license='MIT'
)
| 22.833333
| 64
| 0.681265
|
407dee9b3a066a9863a8215dc3f134bf0056bcc4
| 161
|
py
|
Python
|
yapftests/huawei/huaweistyle/resources/paragraph_2/2_2_a_function_comment_correct_.py
|
akuleshov7/huawei-yapf
|
72c188257e2bafa4da9e553ba0ee9b86831c51b8
|
[
"Apache-2.0"
] | 12
|
2020-04-24T18:12:08.000Z
|
2021-06-09T16:53:10.000Z
|
yapftests/huawei/huaweistyle/resources/paragraph_2/2_2_a_function_comment_correct_.py
|
akuleshov7/h-yapf
|
72c188257e2bafa4da9e553ba0ee9b86831c51b8
|
[
"Apache-2.0"
] | 1
|
2020-09-09T19:55:35.000Z
|
2020-09-14T00:07:29.000Z
|
yapftests/huawei/huaweistyle/resources/paragraph_2/2_2_a_function_comment_correct_.py
|
akuleshov7/h-yapf
|
72c188257e2bafa4da9e553ba0ee9b86831c51b8
|
[
"Apache-2.0"
] | null | null | null |
def some_function():
"""
Function description: some description
Parameters:
Return Value:
Exception Description:
Change History:
"""
| 17.888889
| 42
| 0.645963
|
9fc10540c482aadac9f861e947075535b58c712d
| 2,857
|
py
|
Python
|
intro-to-pytorch/helper.py
|
iArunava/Intro-to-Deep-Learning-with-Pytorch-Udacity-Solutions
|
2149499fa1ed4757362d9b743503718b4a26c3bc
|
[
"MIT"
] | 16
|
2018-11-22T14:35:45.000Z
|
2022-02-05T10:40:28.000Z
|
intro-to-pytorch/helper.py
|
iArunava/Intro-to-Deep-Learning-with-Pytorch-Udacity-Solutions
|
2149499fa1ed4757362d9b743503718b4a26c3bc
|
[
"MIT"
] | null | null | null |
intro-to-pytorch/helper.py
|
iArunava/Intro-to-Deep-Learning-with-Pytorch-Udacity-Solutions
|
2149499fa1ed4757362d9b743503718b4a26c3bc
|
[
"MIT"
] | 11
|
2019-02-03T09:16:08.000Z
|
2020-07-22T21:19:49.000Z
|
# This helper module is taken from udacity/deep-learning-v2-pytorch/intro-to-pytorch/helper.py
# The code here may contain modifications
import matplotlib.pyplot as plt
import numpy as np
from torch import nn, optim
from torch.autograd import Variable
def test_network(net, trainloader):
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Create Variables for the inputs and targets
inputs = Variable(images)
targets = Variable(images)
# Clear the gradients from all Variables
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = net.forward(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
return True
def imshow(image, ax=None, title=None, normalize=True):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
image = image.numpy().transpose((1, 2, 0))
if normalize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
def view_recon(img, recon):
''' Function for displaying an image (as a PyTorch Tensor) and its
reconstruction also a PyTorch Tensor
'''
fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)
axes[0].imshow(img.numpy().squeeze())
axes[1].imshow(recon.data.numpy().squeeze())
for ax in axes:
ax.axis('off')
ax.set_adjustable('box-forced')
def view_classify(img, ps, version="MNIST"):
''' Function for viewing an image and it's predicted classes.
'''
ps = ps.data.numpy().squeeze()
fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())
ax1.axis('off')
ax2.barh(np.arange(10), ps)
ax2.set_aspect(0.1)
ax2.set_yticks(np.arange(10))
if version == "MNIST":
ax2.set_yticklabels(np.arange(10))
elif version == "Fashion":
ax2.set_yticklabels(['T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle Boot'], size='small');
ax2.set_title('Class Probability')
ax2.set_xlim(0, 1.1)
plt.tight_layout()
| 29.153061
| 94
| 0.59013
|
4728b58b7bfdcb5de67291484c04a21ee2351e39
| 250
|
py
|
Python
|
manage.py
|
anoited007/country-dashboard
|
577bbcc4992e24c484650895fabbcdf4343e1bdb
|
[
"MIT"
] | 16
|
2017-10-19T03:36:41.000Z
|
2022-03-03T11:46:20.000Z
|
manage.py
|
ChrisAchinga/wazimap
|
a66a1524030a8b98e7ea0dfb270d1946ca75b3b2
|
[
"MIT"
] | 66
|
2016-02-15T08:59:29.000Z
|
2017-09-21T14:00:43.000Z
|
manage.py
|
ChrisAchinga/wazimap
|
a66a1524030a8b98e7ea0dfb270d1946ca75b3b2
|
[
"MIT"
] | 18
|
2017-10-06T12:26:37.000Z
|
2021-08-30T01:38:37.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wazimap.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.727273
| 71
| 0.772
|
e6bab8365ae1ee803f7308a6dda547fe2548b2fd
| 5,624
|
py
|
Python
|
miscellaneous/PDBscraper.py
|
frumpowy/miscellaneous
|
07b7a0ae035a13de6bbd4f0f92318e1a5fb6864a
|
[
"MIT"
] | null | null | null |
miscellaneous/PDBscraper.py
|
frumpowy/miscellaneous
|
07b7a0ae035a13de6bbd4f0f92318e1a5fb6864a
|
[
"MIT"
] | null | null | null |
miscellaneous/PDBscraper.py
|
frumpowy/miscellaneous
|
07b7a0ae035a13de6bbd4f0f92318e1a5fb6864a
|
[
"MIT"
] | null | null | null |
"""
This script will submit a query to Protein Data Bank, download all PBD files associated
with the keywords and write a summary to a CSV file, which includes structure code, title,
resolution, reference DOI, etc.
To run it on command line:
$ python PDBscraper.py --query "..." --download ...
Example:
$ python PDBscraper.py --query "integrin alpha V beta 6" --download False
"""
import argparse
from lxml import html
import os
import pandas as pd
import requests
def pdb_search(search_string):
search_string = search_string[0]
url = "https://www.rcsb.org/pdb/rest/search/"
header = {'Content-Type': 'application/x-www-form-urlencoded'}
data = """
<orgPdbQuery><queryType>org.pdb.query.simple.AdvancedKeywordQuery</queryType>
<keywords>{}</keywords></orgPdbQuery>
""".format(search_string)
pdb_codes = requests.post(url, data=data, headers=header).text.split()
return pdb_codes, search_string.replace(" ", "_")
def filter_results(pdb_codes, included, excluded):
filtered = []
for code in pdb_codes:
pdb = PDBObject(code)
title = pdb.title[0].lower() if pdb.title else None
included = included.lower() if included is not None else ""
excluded = excluded.lower() if excluded else None
if excluded:
if included in title and excluded not in title:
filtered.append(pdb)
else:
if included in title:
filtered.append(pdb)
return filtered
def substructure_search(substructure_file, inchi_keys):
from rdkit import Chem
sub = Chem.SDMolSupploer(substructure_file)
matched = []
for key in inchi_keys:
ligand = Chem.MolFromInchi(key)
if ligand.HasSubstructMatch(sub):
matched.append(key)
return matched
class PDBObject:
def __init__(self, code):
self.code = code
self.info_url = "https://www.rcsb.org/structure/{}".format(self.code)
self.download_url = "https://files.rcsb.org/download/{}.pdb".format(self.code)
response = requests.get(self.info_url)
tree = html.fromstring(response.content)
self.title = tree.xpath("//span[@id='structureTitle']/text()")
self.reference_title = tree.xpath("//div[@id='primarycitation']/h4/text()")
self.reference_DOI = tree.xpath("//li[@id='pubmedDOI']/a/text()")
self.resolution = tree.xpath("//ul[contains(@id,'exp_header')]/li[contains(@id,'resolution')]/text()")
self.ligands = tree.xpath("//tr[contains(@id,'ligand_row')]//a[contains(@href, '/ligand/')]/text()")
self.inchi_keys = tree.xpath("//tr[contains(@id, 'ligand_row')]//td[3]/text()")
self.inchi_keys = [key for key in self.inchi_keys if len(key) == 27]
def get_file(self, folder="."):
file_content = requests.get(self.download_url).text
file_name = os.path.join(folder, "{}.pdb".format(self.code))
with open(file_name, "w") as PDB_file:
PDB_file.write(file_content)
def download_structure(pdb_codes, folder="."):
if not os.path.isdir(folder):
os.mkdir(folder)
for code in pdb_codes:
pdb = PDBObject(code)
if pdb.title:
pdb.get_file(folder)
def write_csv(pdb_codes, matched, folder="."):
if not os.path.isdir(folder):
os.mkdir(folder)
file = os.path.join(folder, "summary.csv")
for pdb in pdb_codes:
df = pd.DataFrame(
{'PDB code': pdb.code,
'Title': pdb.title if pdb.title else ["unknown"],
'Resolution': [resolution.replace("Ã…", "Å").replace(" ", "") for resolution in
pdb.resolution] if pdb.resolution else ["unknown"],
'Reference Title': pdb.reference_title if pdb.reference_title else ["unknown"],
'Reference DOI': pdb.reference_DOI if pdb.reference_DOI else ["unknown"],
'Ligands': [", ".join(pdb.ligands)] if pdb.ligands else ["none"],
'InChi keys': [", ".join(pdb.inchi_keys)] if pdb.inchi_keys else ["none"],
'Matched substructures': [key for key in pdb.inchi_keys if key in matched]
})
df.to_csv(file, mode='a', header=False)
return file
def main(search_string, download, included, excluded, substructure_file):
pdb_codes, folder = pdb_search(search_string)
filtered = filter_results(pdb_codes, included, excluded)
for f in filtered:
if download:
print("Downloading structures...")
download_structure(f, folder)
if substructure_file:
matched = substructure_search(substructure_file, f.inchi_keys)
file = write_csv(filtered, matched, folder)
print("Done! Summary available in {}.".format(file))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--query', nargs=1, required=True, help="PDB query (string)")
parser.add_argument('--download', required=False, default=False,
help="Set to 'True', if you want to download associated PDB files")
parser.add_argument('--included', required=False, help="PDB title must include...")
parser.add_argument('--excluded', required=False, help="PDB title must not include...")
parser.add_argument('--substructure', required=False, help="SD file with substructure")
args = parser.parse_args()
return args.query, args.download, args.included, args.excluded, args.substructure
if __name__ == "__main__":
query, download, included, excluded, substructure = parse_args()
main(query, download, included, excluded, substructure)
| 35.594937
| 110
| 0.648293
|
17473324feccc38a12a2d7ee0d681ebb0b67fd6d
| 10,801
|
py
|
Python
|
tests/test_rst_compiler.py
|
magmax/nikola
|
4cd4584c574cb5ebff8ca90535498ef47dd05a04
|
[
"MIT"
] | 1
|
2016-09-12T18:10:07.000Z
|
2016-09-12T18:10:07.000Z
|
tests/test_rst_compiler.py
|
magmax/nikola
|
4cd4584c574cb5ebff8ca90535498ef47dd05a04
|
[
"MIT"
] | null | null | null |
tests/test_rst_compiler.py
|
magmax/nikola
|
4cd4584c574cb5ebff8ca90535498ef47dd05a04
|
[
"MIT"
] | null | null | null |
# coding: utf8
# Author: Rodrigo Bistolfi
# Date: 03/2013
""" Test cases for Nikola ReST extensions.
A base class ReSTExtensionTestCase provides the tests basic behaivor.
Subclasses must override the "sample" class attribute with the ReST markup.
The sample will be rendered as HTML using publish_parts() by setUp().
One method is provided for checking the resulting HTML:
* assertHTMLContains(element, attributes=None, text=None)
The HTML is parsed with lxml for checking against the data you provide. The
method takes an element argument, a string representing the *name* of an HTML
tag, like "script" or "iframe". We will try to find this tag in the document
and perform the tests on it. You can pass a dictionary to the attributes kwarg
representing the name and the value of the tag attributes. The text kwarg takes
a string argument, which will be tested against the contents of the HTML
element.
One last caveat: you need to url unquote your urls if you are going to test
attributes like "src" or "link", since the HTML rendered by docutils will be
always unquoted.
"""
from __future__ import unicode_literals, absolute_import
import os
import sys
import io
try:
from io import StringIO
except ImportError:
from StringIO import StringIO # NOQA
import tempfile
import docutils
from lxml import html
import pytest
import unittest
import nikola.plugins.compile.rest
from nikola.plugins.compile.rest import gist
from nikola.plugins.compile.rest import vimeo
import nikola.plugins.compile.rest.listing
from nikola.plugins.compile.rest.doc import Plugin as DocPlugin
from nikola.utils import _reload
from .base import BaseTestCase, FakeSite
class ReSTExtensionTestCase(BaseTestCase):
""" Base class for testing ReST extensions """
sample = 'foo'
deps = None
def setUp(self):
self.compiler = nikola.plugins.compile.rest.CompileRest()
self.compiler.set_site(FakeSite())
return super(ReSTExtensionTestCase, self).setUp()
def basic_test(self):
""" Parse cls.sample into a HTML document tree """
self.setHtmlFromRst(self.sample)
def setHtmlFromRst(self, rst):
""" Create html output from rst string """
tmpdir = tempfile.mkdtemp()
inf = os.path.join(tmpdir, 'inf')
outf = os.path.join(tmpdir, 'outf')
depf = os.path.join(tmpdir, 'outf.dep')
with io.open(inf, 'w+', encoding='utf8') as f:
f.write(rst)
self.html = self.compiler.compile_html(inf, outf)
with io.open(outf, 'r', encoding='utf8') as f:
self.html = f.read()
os.unlink(inf)
os.unlink(outf)
if os.path.isfile(depf):
with io.open(depf, 'r', encoding='utf8') as f:
self.assertEqual(self.deps, f.read())
os.unlink(depf)
else:
self.assertEqual(self.deps, None)
os.rmdir(tmpdir)
self.html_doc = html.parse(StringIO(self.html))
def assertHTMLContains(self, element, attributes=None, text=None):
""" Test if HTML document includes an element with the given
attributes and text content
"""
try:
tag = next(self.html_doc.iter(element))
except StopIteration:
raise Exception("<{0}> not in {1}".format(element, self.html))
else:
if attributes:
arg_attrs = set(attributes.items())
tag_attrs = set(tag.items())
self.assertTrue(arg_attrs.issubset(tag_attrs))
if text:
self.assertIn(text, tag.text)
class ReSTExtensionTestCaseTestCase(ReSTExtensionTestCase):
""" Simple test for our base class :) """
sample = '.. raw:: html\n\n <iframe src="foo" height="bar">spam</iframe>'
def test_test(self):
self.basic_test()
self.assertHTMLContains("iframe", attributes={"src": "foo"},
text="spam")
self.assertRaises(Exception, self.assertHTMLContains, "eggs", {})
class MathTestCase(ReSTExtensionTestCase):
sample = ':math:`e^{ix} = \cos x + i\sin x`'
def test_mathjax(self):
""" Test that math is outputting MathJax."""
self.basic_test()
self.assertHTMLContains("span", attributes={"class": "math"},
text="\(e^{ix} = \cos x + i\sin x\)")
class GistTestCase(ReSTExtensionTestCase):
""" Test GitHubGist.
We will replace get_raw_gist() and get_raw_gist_with_filename()
monkeypatching the GitHubGist class for avoiding network dependency
"""
gist_type = gist.GitHubGist
sample = '.. gist:: fake_id\n :file: spam.py'
sample_without_filename = '.. gist:: fake_id2'
def setUp(self):
""" Patch GitHubGist for avoiding network dependency """
super(GistTestCase, self).setUp()
self.gist_type.get_raw_gist_with_filename = lambda *_: 'raw_gist_file'
self.gist_type.get_raw_gist = lambda *_: "raw_gist"
_reload(nikola.plugins.compile.rest)
@pytest.mark.skipif(True, reason="This test indefinitely skipped.")
def test_gist(self):
""" Test the gist directive with filename """
self.setHtmlFromRst(self.sample)
output = 'https://gist.github.com/fake_id.js?file=spam.py'
self.assertHTMLContains("script", attributes={"src": output})
self.assertHTMLContains("pre", text="raw_gist_file")
@pytest.mark.skipif(True, reason="This test indefinitely skipped.")
def test_gist_without_filename(self):
""" Test the gist directive without filename """
self.setHtmlFromRst(self.sample_without_filename)
output = 'https://gist.github.com/fake_id2.js'
self.assertHTMLContains("script", attributes={"src": output})
self.assertHTMLContains("pre", text="raw_gist")
class GistIntegrationTestCase(ReSTExtensionTestCase):
""" Test requests integration. The gist plugin uses requests to fetch gist
contents and place it in a noscript tag.
"""
sample = '.. gist:: 1812835'
def test_gist_integration(self):
""" Fetch contents of the gist from GH and render in a noscript tag """
self.basic_test()
text = ('Be alone, that is the secret of invention: be alone, that is'
' when ideas are born. -- Nikola Tesla')
self.assertHTMLContains('pre', text=text)
class SlidesTestCase(ReSTExtensionTestCase):
""" Slides test case """
sample = '.. slides:: IMG.jpg\n'
def test_slides(self):
""" Test the slides js generation and img tag creation """
self.basic_test()
self.assertHTMLContains("img", attributes={"src": "IMG.jpg"})
class SoundCloudTestCase(ReSTExtensionTestCase):
""" SoundCloud test case """
sample = '.. soundcloud:: SID\n :height: 400\n :width: 600'
def test_soundcloud(self):
""" Test SoundCloud iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("https://w.soundcloud.com"
"/player/?url=http://"
"api.soundcloud.com/"
"tracks/SID"),
"height": "400", "width": "600"})
class VimeoTestCase(ReSTExtensionTestCase):
"""Vimeo test.
Set Vimeo.request_size to False for avoiding querying the Vimeo api
over the network
"""
sample = '.. vimeo:: VID\n :height: 400\n :width: 600'
def setUp(self):
""" Disable query of the vimeo api over the wire """
vimeo.Vimeo.request_size = False
super(VimeoTestCase, self).setUp()
_reload(nikola.plugins.compile.rest)
def test_vimeo(self):
""" Test Vimeo iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("//player.vimeo.com/"
"video/VID"),
"height": "400", "width": "600"})
class YoutubeTestCase(ReSTExtensionTestCase):
""" Youtube test case """
sample = '.. youtube:: YID\n :height: 400\n :width: 600'
def test_youtube(self):
""" Test Youtube iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("//www.youtube.com/"
"embed/YID?rel=0&hd=1&"
"wmode=transparent"),
"height": "400", "width": "600"})
class ListingTestCase(ReSTExtensionTestCase):
""" Listing test case and CodeBlock alias tests """
deps = None
sample1 = '.. listing:: nikola.py python\n\n'
sample2 = '.. code-block:: python\n\n import antigravity'
sample3 = '.. sourcecode:: python\n\n import antigravity'
# def test_listing(self):
# """ Test that we can render a file object contents without errors """
# with cd(os.path.dirname(__file__)):
# self.deps = 'listings/nikola.py'
# self.setHtmlFromRst(self.sample1)
def test_codeblock_alias(self):
""" Test CodeBlock aliases """
self.deps = None
self.setHtmlFromRst(self.sample2)
self.setHtmlFromRst(self.sample3)
class DocTestCase(ReSTExtensionTestCase):
""" Ref role test case """
sample = 'Sample for testing my :doc:`doesnt-exist-post`'
sample1 = 'Sample for testing my :doc:`fake-post`'
sample2 = 'Sample for testing my :doc:`titled post <fake-post>`'
def setUp(self):
# Initialize plugin, register role
self.plugin = DocPlugin()
self.plugin.set_site(FakeSite())
# Hack to fix leaked state from integration tests
try:
f = docutils.parsers.rst.roles.role('doc', None, None, None)[0]
f.site = FakeSite()
except AttributeError:
pass
return super(DocTestCase, self).setUp()
def test_doc_doesnt_exist(self):
self.assertRaises(Exception, self.assertHTMLContains, 'anything', {})
def test_doc(self):
self.setHtmlFromRst(self.sample1)
self.assertHTMLContains('a',
text='Fake post',
attributes={'href': '/posts/fake-post'})
def test_doc_titled(self):
self.setHtmlFromRst(self.sample2)
self.assertHTMLContains('a',
text='titled post',
attributes={'href': '/posts/fake-post'})
if __name__ == "__main__":
unittest.main()
| 35.529605
| 79
| 0.612351
|
3efccdbaba59e551984c10ea06ec55e1f47aaf75
| 1,008
|
py
|
Python
|
resources/store.py
|
mahmoudtokura/flask-rest-api
|
c979286e8a752aa56f841bbe9eafdf6052c4479e
|
[
"MIT"
] | null | null | null |
resources/store.py
|
mahmoudtokura/flask-rest-api
|
c979286e8a752aa56f841bbe9eafdf6052c4479e
|
[
"MIT"
] | null | null | null |
resources/store.py
|
mahmoudtokura/flask-rest-api
|
c979286e8a752aa56f841bbe9eafdf6052c4479e
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource
from models.storemodel import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json()
else:
return {"message": "Store not found"}, 404
def post(self, name):
store = StoreModel.find_by_name(name)
if store:
return {"message": "Store name taken"}, 400
store = StoreModel(name=name)
try:
store.save_to_db()
except:
return {"message": "Error creating store"}, 500
return store.json(), 201
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {"message": "Store deleted"}
return {"message": "Store does not exist"}
class StoreList(Resource):
def get(self):
stores = StoreModel.query.all()
return {"Stores": [store.json() for store in stores]}
| 26.526316
| 61
| 0.584325
|
dbfea6a896d55680ab5c5ea1e5b8d093ffe1ba42
| 9,442
|
py
|
Python
|
pajbot/modules/predict.py
|
gigglearrows/anniesbot
|
fb9fb92b827c6c78efebb415f10d015216fb3ba2
|
[
"MIT"
] | null | null | null |
pajbot/modules/predict.py
|
gigglearrows/anniesbot
|
fb9fb92b827c6c78efebb415f10d015216fb3ba2
|
[
"MIT"
] | 1
|
2015-12-24T02:01:21.000Z
|
2018-02-19T01:08:16.000Z
|
pajbot/modules/predict.py
|
gigglearrows/anniesbot
|
fb9fb92b827c6c78efebb415f10d015216fb3ba2
|
[
"MIT"
] | null | null | null |
import logging
import datetime
from pajbot.modules import BaseModule
from pajbot.models.db import DBManager, Base
from pajbot.models.command import Command, CommandExample
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.orm import relationship, joinedload, backref
from sqlalchemy import Column, Integer, Boolean, DateTime, ForeignKey, String
from sqlalchemy.dialects.mysql import TEXT
log = logging.getLogger(__name__)
class PredictionRun(Base):
__tablename__ = 'tb_prediction_run'
id = Column(Integer, primary_key=True)
winner_id = Column(Integer, nullable=True)
started = Column(DateTime, nullable=False)
ended = Column(DateTime, nullable=True)
open = Column(Boolean,
nullable=False,
default=True,
server_default=sqlalchemy.sql.expression.true())
def __init__(self):
self.id = None
self.winner_id = None
self.started = datetime.datetime.now()
self.ended = None
class PredictionRunEntry(Base):
__tablename__ = 'tb_prediction_run_entry'
id = Column(Integer, primary_key=True)
prediction_run_id = Column(Integer, ForeignKey('tb_prediction_run.id'), nullable=False)
user_id = Column(Integer, nullable=False)
prediction = Column(Integer, nullable=False)
user = relationship('User',
cascade='',
uselist=False,
lazy='noload',
foreign_keys='User.id',
primaryjoin='User.id==PredictionRunEntry.user_id')
def __init__(self, prediction_run_id, user_id, prediction):
self.id = None
self.prediction_run_id = prediction_run_id
self.user_id = user_id
self.prediction = prediction
class PredictModule(BaseModule):
ID = __name__.split('.')[-1]
NAME = 'Prediction module'
DESCRIPTION = 'Handles predictions of arena wins'
def load_commands(self, **options):
self.commands['predict'] = Command.raw_command(self.predict,
delay_all=0,
delay_user=10,
sub_only=True,
can_execute_with_whisper=True,
description='Predict how many wins will occur in the Arena challenge')
self.commands['newpredict'] = Command.raw_command(self.new_predict,
delay_all=10,
delay_user=10,
description='Starts a new 100in10 arena run',
level=750)
self.commands['endpredict'] = Command.raw_command(self.end_predict,
delay_all=10,
delay_user=10,
description='Ends a 100in10 arena run',
level=750)
self.commands['closepredict'] = Command.raw_command(self.close_predict,
delay_all=10,
delay_user=10,
description='Close submissions to the latest 100in10 arena run',
level=750)
def predict(self, **options):
bot = options['bot']
message = options['message']
source = options['source']
if source.id is None:
log.warn('Source ID is NONE, attempting to salvage by commiting users to the database.')
bot.users.commit()
log.info('New ID is: {}'.format(source.id))
prediction_number = None
if message is None or len(message) < 0:
# bot.whisper(source.username, 'Missing argument to !predict command. Usage: !predict 69 where 69 is a number between 0 and 120 (inclusive).')
bot.say('{}, Missing argument to !predict command. Usage: !predict 69 where 69 is a number between 0 and 120 (inclusive).'.format(source.username_raw))
return True
try:
prediction_number = int(message.split(' ')[0])
except (KeyError, ValueError):
# bot.whisper(source.username, 'Invalid argument to !predict command. Usage: !predict 69 where 69 is a number between 0 and 120 (inclusive).')
bot.say('{}, Invalid argument to !predict command. Usage: !predict 69 where 69 is a number between 0 and 120 (inclusive).'.format(source.username_raw))
return True
if prediction_number < 0 or prediction_number > 120:
# bot.whisper(source.username, 'Invalid argument to !predict command. The prediction must be a value between 0 and 120 (inclusive).')
bot.say('{}, Invalid argument to !predict command. Usage: !predict 69 where 69 is a number between 0 and 120 (inclusive).'.format(source.username_raw))
return True
with DBManager.create_session_scope() as db_session:
# Get the current open prediction
current_prediction_run = db_session.query(PredictionRun).filter_by(ended=None, open=True).one_or_none()
if current_prediction_run is None:
# bot.whisper(source.username, 'There is no arena run active that accepts predictions right now.')
bot.say('{}, There is no arena run active that accepts predictions right now.'.format(source.username_raw))
return True
user_entry = db_session.query(PredictionRunEntry).filter_by(prediction_run_id=current_prediction_run.id, user_id=source.id).one_or_none()
if user_entry is not None:
old_prediction_num = user_entry.prediction
user_entry.prediction = prediction_number
# bot.whisper(source.username, 'Updated your prediction for run {} from {} to {}'.format(
# current_prediction_run.id, old_prediction_num, prediction_number))
bot.say('{}, Updated your prediction for run {} from {} to {}'.format(
source.username_raw, current_prediction_run.id, old_prediction_num, prediction_number))
else:
user_entry = PredictionRunEntry(current_prediction_run.id, source.id, prediction_number)
db_session.add(user_entry)
# bot.whisper(source.username, 'Your prediction for {} wins in run {} has been submitted.'.format(
# prediction_number, current_prediction_run.id))
bot.say('{}, Your prediction for {} wins in run {} has been submitted.'.format(
source.username_raw, prediction_number, current_prediction_run.id))
def new_predict(self, **options):
bot = options['bot']
source = options['source']
with DBManager.create_session_scope() as db_session:
# Check if there is already an open prediction
current_prediction_run = db_session.query(PredictionRun).filter_by(ended=None, open=True).one_or_none()
if current_prediction_run is not None:
# bot.whisper(source.username, 'There is already a prediction run accepting submissions, use !closepredict to close submissions for that prediction.')
bot.say('{}, There is already a prediction run accepting submissions, use !closepredict to close submissions for that prediction.'.format(source.username_raw))
return True
new_prediction_run = PredictionRun()
db_session.add(new_prediction_run)
db_session.commit()
# bot.whisper(source.username, 'A new prediction run has been started, and is now accepting submissions. Prediction run ID: {}'.format(new_prediction_run.id))
bot.say('A new prediction run has been started, and is now accepting submissions. Prediction run ID: {}'.format(new_prediction_run.id))
def end_predict(self, **options):
bot = options['bot']
source = options['source']
with DBManager.create_session_scope() as db_session:
# Check if there is a non-ended, but closed prediction run we can end
current_prediction_run = db_session.query(PredictionRun).filter_by(ended=None, open=False).one_or_none()
if current_prediction_run is None:
# bot.whisper(source.username, 'There is no closed prediction runs we can end right now.')
bot.say('{}, There is no closed prediction runs we can end right now.'.format(source.username_raw))
return True
current_prediction_run.ended = datetime.datetime.now()
# bot.whisper(source.username, 'Prediction run with ID {} has been closed.'.format(current_prediction_run.id))
bot.say('Prediction run with ID {} has been closed.'.format(current_prediction_run.id))
def close_predict(self, **options):
bot = options['bot']
source = options['source']
with DBManager.create_session_scope() as db_session:
# Check if there is a non-ended, but closed prediction run we can end
current_prediction_run = db_session.query(PredictionRun).filter_by(ended=None, open=True).one_or_none()
if current_prediction_run is None:
# bot.whisper(source.username, 'There is no open prediction runs we can close right now.')
bot.say('{}, There is no open prediction runs we can close right now.'.format(source.username_raw))
return True
current_prediction_run.open = False
# bot.whisper(source.username, 'Predictions are no longer accepted for prediction run {}'.format(current_prediction_run.id))
bot.say('{}, Predictions are no longer accepted for prediction run {}'.format(source.username_raw, current_prediction_run.id))
| 50.491979
| 175
| 0.654522
|
9195f5e986eb857cdf9a95aa4ae87e1fd72afe7d
| 1,127
|
py
|
Python
|
galaxy_utils/sequence/scripts/fastq_paired_end_splitter.py
|
galaxyproject/sequence_utils
|
f7e8cd163d27cb6c16a86ae63e5912ffe32e92ba
|
[
"CC-BY-3.0"
] | 5
|
2015-10-31T11:28:50.000Z
|
2020-09-08T20:13:48.000Z
|
galaxy_utils/sequence/scripts/fastq_paired_end_splitter.py
|
galaxyproject/sequence_utils
|
f7e8cd163d27cb6c16a86ae63e5912ffe32e92ba
|
[
"CC-BY-3.0"
] | 22
|
2015-12-09T00:13:48.000Z
|
2020-02-18T12:25:38.000Z
|
galaxy_utils/sequence/scripts/fastq_paired_end_splitter.py
|
galaxyproject/sequence_utils
|
f7e8cd163d27cb6c16a86ae63e5912ffe32e92ba
|
[
"CC-BY-3.0"
] | 8
|
2015-10-21T13:22:18.000Z
|
2020-02-07T09:54:00.000Z
|
# Dan Blankenberg
import sys
from galaxy_utils.sequence.fastq import (
fastqReader,
fastqSplitter,
fastqWriter,
)
def main():
# Read command line arguments
input_filename = sys.argv[1]
input_type = sys.argv[2] or 'sanger'
output1_filename = sys.argv[3]
output2_filename = sys.argv[4]
splitter = fastqSplitter()
i = None
skip_count = 0
writer1 = fastqWriter(path=output1_filename, format=input_type)
writer2 = fastqWriter(path=output2_filename, format=input_type)
reader = fastqReader(path=input_filename, format=input_type)
with writer1, writer2, reader:
for i, fastq_read in enumerate(reader):
read1, read2 = splitter.split(fastq_read)
if read1 and read2:
writer1.write(read1)
writer2.write(read2)
else:
skip_count += 1
if i is None:
print("Your file contains no valid FASTQ reads.")
else:
print(f'Split {i - skip_count + 1} of {i + 1} reads ({float(i - skip_count + 1) / float(i + 1) * 100.0:.2f}%).')
if __name__ == "__main__":
main()
| 26.209302
| 120
| 0.62378
|
3791acafa95695c51dff40a3519b9d86b1e08a8f
| 1,909
|
py
|
Python
|
benchmark/startPyquil2796.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2796.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2796.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=23
prog += RX(-0.6848671984825748,1) # number=26
prog += CZ(0,3) # number=24
prog += H(3) # number=25
prog += H(3) # number=37
prog += CZ(0,3) # number=38
prog += H(3) # number=39
prog += CNOT(0,3) # number=30
prog += CNOT(0,3) # number=40
prog += X(3) # number=41
prog += CNOT(0,3) # number=42
prog += CNOT(0,3) # number=32
prog += H(3) # number=33
prog += CZ(0,3) # number=34
prog += H(3) # number=35
prog += CNOT(0,3) # number=15
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += CNOT(3,0) # number=20
prog += Z(3) # number=21
prog += H(0) # number=27
prog += CZ(3,0) # number=28
prog += H(0) # number=29
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += H(1) # number=36
prog += Y(2) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2796.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.453333
| 64
| 0.563646
|
17772207bdc95492a90498ea50aa17ce67006318
| 185
|
py
|
Python
|
sentimental_analysis/audio/test_AudioAnalyzer.py
|
se20z09/p3_celt
|
44925fad94d412d1e48d0319d793e7a5b5b34288
|
[
"MIT"
] | null | null | null |
sentimental_analysis/audio/test_AudioAnalyzer.py
|
se20z09/p3_celt
|
44925fad94d412d1e48d0319d793e7a5b5b34288
|
[
"MIT"
] | 19
|
2020-09-28T21:14:08.000Z
|
2020-10-27T20:20:33.000Z
|
sentimental_analysis/audio/test_AudioAnalyzer.py
|
se20z09/p3_celt
|
44925fad94d412d1e48d0319d793e7a5b5b34288
|
[
"MIT"
] | 10
|
2020-09-29T21:52:36.000Z
|
2021-09-29T01:02:11.000Z
|
from pydoc import text
def speech_to_text(self, filename):
return text
def sentiment_analyzer_scores(analyser=None):
assert score==analyser.polarity_scores(sentence)
| 20.555556
| 53
| 0.756757
|
fee70c77082862f18b222bc0c6a587726ee059c9
| 5,342
|
py
|
Python
|
image_classification/ConvMLP/config.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | 1
|
2022-01-12T00:46:53.000Z
|
2022-01-12T00:46:53.000Z
|
image_classification/ConvMLP/config.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | null | null | null |
image_classification/ConvMLP/config.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration
Configuration for data, model archtecture, and training, etc.
Config can be set by .yaml file or by argparser(limited usage)
"""
import os
from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.BASE = ['']
# data settings
_C.DATA = CN()
_C.DATA.BATCH_SIZE = 8 #1024 batch_size for single GPU
_C.DATA.BATCH_SIZE_EVAL = 8 #1024 batch_size for single GPU
_C.DATA.DATA_PATH = '/dataset/imagenet/' # path to dataset
_C.DATA.DATASET = 'imagenet2012' # dataset name
_C.DATA.IMAGE_SIZE = 224 # input image size
_C.DATA.CROP_PCT = 0.9 # input image scale ratio, scale is applied before centercrop in eval mode
_C.DATA.NUM_WORKERS = 4 # number of data loading threads
_C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # [0.5, 0.5, 0.5]
_C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # [0.5, 0.5, 0.5]
# model settings
_C.MODEL = CN()
_C.MODEL.TYPE = 'ConvMLP'
_C.MODEL.NAME = 'ConvMLP'
_C.MODEL.RESUME = None
_C.MODEL.PRETRAINED = None
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.DROPOUT = 0.0
_C.MODEL.ATTENTION_DROPOUT = 0.0
_C.MODEL.DROP_PATH = 0.1
# transformer settings
_C.MODEL.MIXER = CN()
_C.MODEL.MIXER.BLOCKS = [2, 4, 2]
_C.MODEL.MIXER.DIMS = [128, 256, 512]
_C.MODEL.MIXER.MLP_RATIOS = [2, 2, 2]
_C.MODEL.MIXER.CHANNELS = 64
_C.MODEL.MIXER.N_CONV_BLOCKS = 2
# training settings
_C.TRAIN = CN()
_C.TRAIN.LAST_EPOCH = 0
_C.TRAIN.NUM_EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 0.001
_C.TRAIN.WARMUP_START_LR = 5e-7
_C.TRAIN.END_LR = 5e-6
_C.TRAIN.GRAD_CLIP = 5.0
_C.TRAIN.ACCUM_ITER = 1
_C.TRAIN.LINEAR_SCALED_LR = None
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'warmupcosine'
_C.TRAIN.LR_SCHEDULER.MILESTONES = "30, 60, 90" # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 # only used in StepLRScheduler
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'AdamW'
_C.TRAIN.OPTIMIZER.EPS = 1e-8
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) # for adamW
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# train augmentation
_C.TRAIN.MIXUP_ALPHA = 0.8
_C.TRAIN.CUTMIX_ALPHA = 1.0
_C.TRAIN.CUTMIX_MINMAX = None
_C.TRAIN.MIXUP_PROB = 1.0
_C.TRAIN.MIXUP_SWITCH_PROB = 0.5
_C.TRAIN.MIXUP_MODE = 'batch'
_C.TRAIN.SMOOTHING = 0.1
_C.TRAIN.COLOR_JITTER = 0.4
_C.TRAIN.AUTO_AUGMENT = False #'rand-m9-mstd0.5-inc1'
_C.TRAIN.RAND_AUGMENT = False
_C.TRAIN.RANDOM_ERASE_PROB = 0.25
_C.TRAIN.RANDOM_ERASE_MODE = 'pixel'
_C.TRAIN.RANDOM_ERASE_COUNT = 1
_C.TRAIN.RANDOM_ERASE_SPLIT = False
# misc
_C.SAVE = "./output"
_C.TAG = "default"
_C.SAVE_FREQ = 1 # freq to save chpt
_C.REPORT_FREQ = 50 # freq to logging info
_C.VALIDATE_FREQ = 20 # freq to do validation
_C.SEED = 0
_C.EVAL = False # run evaluation only
_C.AMP = False # mix precision training
_C.LOCAL_RANK = 0
_C.NGPUS = -1
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as infile:
yaml_cfg = yaml.load(infile, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('merging config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
"""Update config by ArgumentParser
Args:
args: ArgumentParser contains options
Return:
config: updated config
"""
if args.cfg:
_update_config_from_file(config, args.cfg)
config.defrost()
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.image_size:
config.DATA.IMAGE_SIZE = args.image_size
if args.num_classes:
config.MODEL.NUM_CLASSES = args.num_classes
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.output is not None:
config.SAVE = args.output
if args.ngpus:
config.NGPUS = args.ngpus
if args.eval:
config.EVAL = True
config.DATA.BATCH_SIZE_EVAL = args.batch_size
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.last_epoch:
config.TRAIN.LAST_EPOCH = args.last_epoch
if args.amp: # only during training
if config.EVAL is True:
config.AMP = False
else:
config.AMP = True
#config.freeze()
return config
def get_config(cfg_file=None):
"""Return a clone of config or load from yaml file"""
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
| 29.843575
| 97
| 0.703482
|
a650e241562c87d9e77c8865c8cb037b1b4fe1be
| 242
|
py
|
Python
|
python-upbit/basic/unit12/01.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | 9
|
2020-10-25T15:13:32.000Z
|
2022-03-26T11:27:21.000Z
|
python-upbit/basic/unit12/01.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | null | null | null |
python-upbit/basic/unit12/01.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | 7
|
2021-03-01T11:06:45.000Z
|
2022-03-14T07:06:04.000Z
|
import pyupbit
import pprint
f = open("upbit.txt")
lines = f.readlines()
access = lines[0].strip()
secret = lines[1].strip()
f.close()
upbit = pyupbit.Upbit(access, secret)
resp = upbit.buy_market_order("KRW-XRP", 10000)
pprint.pprint(resp)
| 20.166667
| 47
| 0.719008
|
2b2eac080b6ec01006676a63defb95793630017f
| 38,582
|
py
|
Python
|
src/robot/libraries/Process.py
|
ncbi/robotframework
|
f5426d54f92dfa884615cea4eae08023b23a1b2e
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2016-01-13T13:48:28.000Z
|
2021-09-05T07:14:07.000Z
|
src/robot/libraries/Process.py
|
ncbi/robotframework
|
f5426d54f92dfa884615cea4eae08023b23a1b2e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/libraries/Process.py
|
ncbi/robotframework
|
f5426d54f92dfa884615cea4eae08023b23a1b2e
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2018-01-18T22:00:17.000Z
|
2018-07-26T10:53:00.000Z
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import ctypes
import os
import subprocess
import sys
import time
import signal as signal_module
from robot.utils import (ConnectionCache, abspath, encode_to_system,
decode_output, secs_to_timestr, timestr_to_secs)
from robot.version import get_version
from robot.api import logger
if os.sep == '/' and sys.platform.startswith('java'):
encode_to_system = lambda string: unicode(string)
class Process(object):
"""Robot Framework test library for running processes.
This library utilizes Python's
[http://docs.python.org/2/library/subprocess.html|subprocess]
module and its
[http://docs.python.org/2/library/subprocess.html#subprocess.Popen|Popen]
class.
The library has following main usages:
- Running processes in system and waiting for their completion using
`Run Process` keyword.
- Starting processes on background using `Start Process`.
- Waiting started process to complete using `Wait For Process` or
stopping them with `Terminate Process` or `Terminate All Processes`.
This library is new in Robot Framework 2.8.
== Table of contents ==
- `Specifying command and arguments`
- `Process configuration`
- `Active process`
- `Result object`
- `Boolean arguments`
- `Using with OperatingSystem library`
- `Example`
- `Shortcuts`
- `Keywords`
= Specifying command and arguments =
Both `Run Process` and `Start Process` accept the command to execute
and all arguments passed to it as separate arguments. This is convenient
to use and also allows these keywords to automatically escape possible
spaces and other special characters in the command or arguments.
When `running processes in shell`, it is also possible to give the
whole command to execute as a single string. The command can then
contain multiple commands, for example, connected with pipes. When
using this approach the caller is responsible on escaping.
Examples:
| `Run Process` | ${progdir}${/}prog.py | first arg | second |
| `Run Process` | script1.sh arg && script2.sh | shell=yes | cwd=${progdir} |
Starting from Robot Framework 2.8.6, possible non-string arguments are
converted to strings automatically.
= Process configuration =
`Run Process` and `Start Process` keywords can be configured using
optional `**configuration` keyword arguments. Configuration arguments
must be given after other arguments passed to these keywords and must
use syntax like `name=value`. Available configuration arguments are
listed below and discussed further in sections afterwards.
| = Name = | = Explanation = |
| shell | Specifies whether to run the command in shell or not |
| cwd | Specifies the working directory. |
| env | Specifies environment variables given to the process. |
| env:<name> | Overrides the named environment variable(s) only. |
| stdout | Path of a file where to write standard output. |
| stderr | Path of a file where to write standard error. |
| alias | Alias given to the process. |
Note that because `**configuration` is passed using `name=value` syntax,
possible equal signs in other arguments passed to `Run Process` and
`Start Process` must be escaped with a backslash like `name\\=value`.
See `Run Process` for an example.
== Running processes in shell ==
The `shell` argument specifies whether to run the process in a shell or
not. By default shell is not used, which means that shell specific
commands, like `copy` and `dir` on Windows, are not available.
Giving the `shell` argument any non-false value, such as `shell=True`,
changes the program to be executed in a shell. It allows using the shell
capabilities, but can also make the process invocation operating system
dependent.
When using a shell it is possible to give the whole command to execute
as a single string. See `Specifying command and arguments` section for
examples and more details in general.
== Current working directory ==
By default the child process will be executed in the same directory
as the parent process, the process running tests, is executed. This
can be changed by giving an alternative location using the `cwd` argument.
Forward slashes in the given path are automatically converted to
backslashes on Windows.
`Standard output and error streams`, when redirected to files,
are also relative to the current working directory possibly set using
the `cwd` argument.
Example:
| `Run Process` | prog.exe | cwd=${ROOT}/directory | stdout=stdout.txt |
== Environment variables ==
By default the child process will get a copy of the parent process's
environment variables. The `env` argument can be used to give the
child a custom environment as a Python dictionary. If there is a need
to specify only certain environment variable, it is possible to use the
`env:<name>=<value>` format to set or override only that named variables.
It is also possible to use these two approaches together.
Examples:
| `Run Process` | program | env=${environ} |
| `Run Process` | program | env:http_proxy=10.144.1.10:8080 | env:PATH=%{PATH}${:}${PROGDIR} |
| `Run Process` | program | env=${environ} | env:EXTRA=value |
== Standard output and error streams ==
By default processes are run so that their standard output and standard
error streams are kept in the memory. This works fine normally,
but if there is a lot of output, the output buffers may get full and
the program can hang.
To avoid output buffers getting full, it is possible to use `stdout`
and `stderr` arguments to specify files on the file system where to
redirect the outputs. This can also be useful if other processes or
other keywords need to read or manipulate the outputs somehow.
Given `stdout` and `stderr` paths are relative to the `current working
directory`. Forward slashes in the given paths are automatically converted
to backslashes on Windows.
As a special feature, it is possible to redirect the standard error to
the standard output by using `stderr=STDOUT`.
Regardless are outputs redirected to files or not, they are accessible
through the `result object` returned when the process ends.
Examples:
| ${result} = | `Run Process` | program | stdout=${TEMPDIR}/stdout.txt | stderr=${TEMPDIR}/stderr.txt |
| `Log Many` | stdout: ${result.stdout} | stderr: ${result.stderr} |
| ${result} = | `Run Process` | program | stderr=STDOUT |
| `Log` | all output: ${result.stdout} |
Note that the created output files are not automatically removed after
the test run. The user is responsible to remove them if needed.
== Alias ==
A custom name given to the process that can be used when selecting the
`active process`.
Examples:
| `Start Process` | program | alias=example |
| `Run Process` | python | -c | print 'hello' | alias=hello |
= Active process =
The test library keeps record which of the started processes is currently
active. By default it is latest process started with `Start Process`,
but `Switch Process` can be used to select a different one. Using
`Run Process` does not affect the active process.
The keywords that operate on started processes will use the active process
by default, but it is possible to explicitly select a different process
using the `handle` argument. The handle can be the identifier returned by
`Start Process` or an `alias` explicitly given to `Start Process` or
`Run Process`.
= Result object =
`Run Process`, `Wait For Process` and `Terminate Process` keywords return a
result object that contains information about the process execution as its
attributes. The same result object, or some of its attributes, can also
be get using `Get Process Result` keyword. Attributes available in the
object are documented in the table below.
| = Attribute = | = Explanation = |
| rc | Return code of the process as an integer. |
| stdout | Contents of the standard output stream. |
| stderr | Contents of the standard error stream. |
| stdout_path | Path where stdout was redirected or `None` if not redirected. |
| stderr_path | Path where stderr was redirected or `None` if not redirected. |
Example:
| ${result} = | `Run Process` | program |
| `Should Be Equal As Integers` | ${result.rc} | 0 |
| `Should Match` | ${result.stdout} | Some t?xt* |
| `Should Be Empty` | ${result.stderr} | |
| ${stdout} = | `Get File` | ${result.stdout_path} |
| `Should Be Equal` | ${stdout} | ${result.stdout} |
| `File Should Be Empty` | ${result.stderr_path} | |
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values.
If such an argument is given as a string, it is considered false if it
is either empty or case-insensitively equal to `false`. Other strings
are considered true regardless what they contain, and other argument
types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Terminate Process` | kill=True | # Strings are generally true. |
| `Terminate Process` | kill=yes | # Same as above. |
| `Terminate Process` | kill=${TRUE} | # Python `True` is true. |
| `Terminate Process` | kill=${42} | # Numbers other than 0 are true. |
False examples:
| `Terminate Process` | kill=False | # String `False` is false. |
| `Terminate Process` | kill=${EMPTY} | # Empty string is false. |
| `Terminate Process` | kill=${FALSE} | # Python `False` is false. |
| `Terminate Process` | kill=${0} | # Number 0 is false. |
Note that prior to Robot Framework 2.8 all non-empty strings, including
`false`, were considered true.
= Using with OperatingSystem library =
The OperatingSystem library also contains keywords for running processes.
They are not as flexible as the keywords provided by this library, and
thus not recommended to be used anymore. They may eventually even be
deprecated.
There is a name collision because both of these libraries have
`Start Process` and `Switch Process` keywords. This is handled so that
if both libraries are imported, the keywords in the Process library are
used by default. If there is a need to use the OperatingSystem variants,
it is possible to use `OperatingSystem.Start Process` syntax or use
the `BuiltIn` keyword `Set Library Search Order` to change the priority.
Other keywords in the OperatingSystem library can be used freely with
keywords in the Process library.
= Example =
| ***** Settings *****
| Library Process
| Suite Teardown `Terminate All Processes` kill=True
|
| ***** Test Cases *****
| Example
| `Start Process` program arg1 arg2 alias=First
| ${handle} = `Start Process` command.sh arg | command2.sh shell=True cwd=/path
| ${result} = `Run Process` ${CURDIR}/script.py
| `Should Not Contain` ${result.stdout} FAIL
| `Terminate Process` ${handle}
| ${result} = `Wait For Process` First
| `Should Be Equal As Integers` ${result.rc} 0
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
TERMINATE_TIMEOUT = 30
KILL_TIMEOUT = 10
def __init__(self):
self._processes = ConnectionCache('No active process.')
self._results = {}
def run_process(self, command, *arguments, **configuration):
"""Runs a process and waits for it to complete.
`command` and `*arguments` specify the command to execute and arguments
passed to it. See `Specifying command and arguments` for more details.
`**configuration` contains additional configuration related to starting
processes and waiting for them to finish. See `Process configuration`
for more details about configuration related to starting processes.
Configuration related to waiting for processes consists of `timeout`
and `on_timeout` arguments that have same semantics as with `Wait
For Process` keyword. By default there is no timeout, and if timeout
is defined the default action on timeout is `terminate`.
Returns a `result object` containing information about the execution.
Note that possible equal signs in `*arguments` must be escaped
with a backslash (e.g. `name\\=value`) to avoid them to be passed in
as `**configuration`.
Examples:
| ${result} = | Run Process | python | -c | print 'Hello, world!' |
| Should Be Equal | ${result.stdout} | Hello, world! |
| ${result} = | Run Process | ${command} | stderr=STDOUT | timeout=10s |
| ${result} = | Run Process | ${command} | timeout=1min | on_timeout=continue |
| ${result} = | Run Process | java -Dname\\=value Example | shell=True | cwd=${EXAMPLE} |
This command does not change the `active process`.
`timeout` and `on_timeout` arguments are new in Robot Framework 2.8.4.
"""
current = self._processes.current
timeout = configuration.pop('timeout', None)
on_timeout = configuration.pop('on_timeout', 'terminate')
try:
handle = self.start_process(command, *arguments, **configuration)
return self.wait_for_process(handle, timeout, on_timeout)
finally:
self._processes.current = current
def start_process(self, command, *arguments, **configuration):
"""Starts a new process on background.
See `Specifying command and arguments` and `Process configuration`
for more information about the arguments, and `Run Process` keyword
for related examples.
Makes the started process new `active process`. Returns an identifier
that can be used as a handle to active the started process if needed.
Starting from Robot Framework 2.8.5, processes are started so that
they create a new process group. This allows sending signals to and
terminating also possible child processes.
"""
config = ProcessConfig(**configuration)
executable_command = self._cmd(command, arguments, config.shell)
logger.info('Starting process:\n%s' % executable_command)
logger.debug('Process configuration:\n%s' % config)
process = subprocess.Popen(executable_command, **config.full_config)
self._results[process] = ExecutionResult(process,
config.stdout_stream,
config.stderr_stream)
return self._processes.register(process, alias=config.alias)
def _cmd(self, command, args, use_shell):
command = [encode_to_system(item) for item in [command] + list(args)]
if not use_shell:
return command
if args:
return subprocess.list2cmdline(command)
return command[0]
def is_process_running(self, handle=None):
"""Checks is the process running or not.
If `handle` is not given, uses the current `active process`.
Returns `True` if the process is still running and `False` otherwise.
"""
return self._processes[handle].poll() is None
def process_should_be_running(self, handle=None,
error_message='Process is not running.'):
"""Verifies that the process is running.
If `handle` is not given, uses the current `active process`.
Fails if the process has stopped.
"""
if not self.is_process_running(handle):
raise AssertionError(error_message)
def process_should_be_stopped(self, handle=None,
error_message='Process is running.'):
"""Verifies that the process is not running.
If `handle` is not given, uses the current `active process`.
Fails if the process is still running.
"""
if self.is_process_running(handle):
raise AssertionError(error_message)
def wait_for_process(self, handle=None, timeout=None, on_timeout='continue'):
"""Waits for the process to complete or to reach the given timeout.
The process to wait for must have been started earlier with
`Start Process`. If `handle` is not given, uses the current
`active process`.
`timeout` defines the maximum time to wait for the process. It is
interpreted according to Robot Framework User Guide Appendix
`Time Format`, for example, '42', '42 s', or '1 minute 30 seconds'.
`on_timeout` defines what to do if the timeout occurs. Possible values
and corresponding actions are explained in the table below. Notice
that reaching the timeout never fails the test.
| = Value = | = Action = |
| `continue` | The process is left running (default). |
| `terminate` | The process is gracefully terminated. |
| `kill` | The process is forcefully stopped. |
See `Terminate Process` keyword for more details how processes are
terminated and killed.
If the process ends before the timeout or it is terminated or killed,
this keyword returns a `result object` containing information about
the execution. If the process is left running, Python `None` is
returned instead.
Examples:
| # Process ends cleanly | | |
| ${result} = | Wait For Process | example |
| Process Should Be Stopped | example | |
| Should Be Equal As Integers | ${result.rc} | 0 |
| # Process does not end | | |
| ${result} = | Wait For Process | timeout=42 secs |
| Process Should Be Running | | |
| Should Be Equal | ${result} | ${NONE} |
| # Kill non-ending process | | |
| ${result} = | Wait For Process | timeout=1min 30s | on_timeout=kill |
| Process Should Be Stopped | | |
| Should Be Equal As Integers | ${result.rc} | -9 |
`timeout` and `on_timeout` are new in Robot Framework 2.8.2.
"""
process = self._processes[handle]
logger.info('Waiting for process to complete.')
if timeout:
timeout = timestr_to_secs(timeout)
if not self._process_is_stopped(process, timeout):
logger.info('Process did not complete in %s.'
% secs_to_timestr(timeout))
return self._manage_process_timeout(handle, on_timeout.lower())
return self._wait(process)
def _manage_process_timeout(self, handle, on_timeout):
if on_timeout == 'terminate':
return self.terminate_process(handle)
elif on_timeout == 'kill':
return self.terminate_process(handle, kill=True)
else:
logger.info('Leaving process intact.')
return None
def _wait(self, process):
result = self._results[process]
result.rc = process.wait() or 0
result.close_streams()
logger.info('Process completed.')
return result
def terminate_process(self, handle=None, kill=False):
"""Stops the process gracefully or forcefully.
If `handle` is not given, uses the current `active process`.
Waits for the process to stop after terminating it. Returns
a `result object` containing information about the execution
similarly as `Wait For Process`.
On Unix-like machines, by default, first tries to terminate the process
group gracefully, but forcefully kills it if it does not stop in 30
seconds. Kills the process group immediately if the `kill` argument is
given any value considered true. See `Boolean arguments` section for
more details about true and false values.
Termination is done using `TERM (15)` signal and killing using
`KILL (9)`. Use `Send Signal To Process` instead if you just want to
send either of these signals without waiting for the process to stop.
On Windows, by default, sends `CTRL_BREAK_EVENT` signal to the process
group. If that does not stop the process in 30 seconds, or `kill`
argument is given a true value, uses Win32 API function
`TerminateProcess()` to kill the process forcefully. Note that
`TerminateProcess()` does not kill possible child processes.
| ${result} = | Terminate Process | |
| Should Be Equal As Integers | ${result.rc} | -15 | # On Unixes |
| Terminate Process | myproc | kill=true |
*NOTE:* Stopping processes requires the
[http://docs.python.org/2/library/subprocess.html|subprocess]
module to have working `terminate` and `kill` functions. They were
added in Python 2.6 and are thus missing from earlier versions.
Unfortunately at least beta releases of Jython 2.7
[http://bugs.jython.org/issue1898|do not seem to support them either].
Automatically killing the process if termination fails as well as
returning a result object are new features in Robot Framework 2.8.2.
Terminating also possible child processes, including using
`CTRL_BREAK_EVENT` on Windows, is new in Robot Framework 2.8.5.
"""
process = self._processes[handle]
if not hasattr(process, 'terminate'):
raise RuntimeError('Terminating processes is not supported '
'by this Python version.')
terminator = self._kill if is_true(kill) else self._terminate
try:
terminator(process)
except OSError:
if not self._process_is_stopped(process, self.KILL_TIMEOUT):
raise
logger.debug('Ignored OSError because process was stopped.')
return self._wait(process)
def _kill(self, process):
logger.info('Forcefully killing process.')
if hasattr(os, 'killpg'):
os.killpg(process.pid, signal_module.SIGKILL)
else:
process.kill()
if not self._process_is_stopped(process, self.KILL_TIMEOUT):
raise RuntimeError('Failed to kill process.')
def _terminate(self, process):
logger.info('Gracefully terminating process.')
# Sends signal to the whole process group both on POSIX and on Windows
# if supported by the interpreter.
if hasattr(os, 'killpg'):
os.killpg(process.pid, signal_module.SIGTERM)
elif hasattr(signal_module, 'CTRL_BREAK_EVENT'):
if sys.platform == 'cli':
# https://ironpython.codeplex.com/workitem/35020
ctypes.windll.kernel32.GenerateConsoleCtrlEvent(
signal_module.CTRL_BREAK_EVENT, process.pid)
else:
process.send_signal(signal_module.CTRL_BREAK_EVENT)
else:
process.terminate()
if not self._process_is_stopped(process, self.TERMINATE_TIMEOUT):
logger.info('Graceful termination failed.')
self._kill(process)
def terminate_all_processes(self, kill=False):
"""Terminates all still running processes started by this library.
This keyword can be used in suite teardown or elsewhere to make
sure that all processes are stopped,
By default tries to terminate processes gracefully, but can be
configured to forcefully kill them immediately. See `Terminate Process`
that this keyword uses internally for more details.
"""
for handle in range(1, len(self._processes) + 1):
if self.is_process_running(handle):
self.terminate_process(handle, kill=kill)
self.__init__()
def send_signal_to_process(self, signal, handle=None, group=False):
"""Sends the given `signal` to the specified process.
If `handle` is not given, uses the current `active process`.
Signal can be specified either as an integer, or anything that can
be converted to an integer, or as a signal name. In the latter case it
is possible to give the name both with or without a `SIG` prefix,
but names are case-sensitive. For example, all the examples below
send signal `INT (2)`:
| Send Signal To Process | 2 | | # Send to active process |
| Send Signal To Process | INT | | |
| Send Signal To Process | SIGINT | myproc | # Send to named process |
What signals are supported depends on the system. For a list of
existing signals on your system, see the Unix man pages related to
signal handling (typically `man signal` or `man 7 signal`).
By default sends the signal only to the parent process, not to possible
child processes started by it. Notice that when `running processes in
shell`, the shell is the parent process and it depends on the system
does the shell propagate the signal to the actual started process.
To send the signal to the whole process group, `group` argument can
be set to any true value:
| Send Signal To Process | TERM | group=yes |
If you are stopping a process, it is often easier and safer to use
`Terminate Process` keyword instead.
*NOTE:* Sending signals requires the
[http://docs.python.org/2/library/subprocess.html|subprocess]
module to have working `send_signal` function. It was added
in Python 2.6 and are thus missing from earlier versions.
How well it will work with forthcoming Jython 2.7 is unknown.
New in Robot Framework 2.8.2. Support for `group` argument is new
in Robot Framework 2.8.5.
"""
if os.sep == '\\':
raise RuntimeError('This keyword does not work on Windows.')
process = self._processes[handle]
signum = self._get_signal_number(signal)
logger.info('Sending signal %s (%d).' % (signal, signum))
if is_true(group) and hasattr(os, 'killpg'):
os.killpg(process.pid, signum)
elif hasattr(process, 'send_signal'):
process.send_signal(signum)
else:
raise RuntimeError('Sending signals is not supported '
'by this Python version.')
def _get_signal_number(self, int_or_name):
try:
return int(int_or_name)
except ValueError:
return self._convert_signal_name_to_number(int_or_name)
def _convert_signal_name_to_number(self, name):
try:
return getattr(signal_module,
name if name.startswith('SIG') else 'SIG' + name)
except AttributeError:
raise RuntimeError("Unsupported signal '%s'." % name)
def get_process_id(self, handle=None):
"""Returns the process ID (pid) of the process.
If `handle` is not given, uses the current `active process`.
Returns the pid assigned by the operating system as an integer.
Note that with Jython, at least with the 2.5 version, the returned
pid seems to always be `None`.
The pid is not the same as the identifier returned by
`Start Process` that is used internally by this library.
"""
return self._processes[handle].pid
def get_process_object(self, handle=None):
"""Return the underlying `subprocess.Popen` object.
If `handle` is not given, uses the current `active process`.
"""
return self._processes[handle]
def get_process_result(self, handle=None, rc=False, stdout=False,
stderr=False, stdout_path=False, stderr_path=False):
"""Returns the specified `result object` or some of its attributes.
The given `handle` specifies the process whose results should be
returned. If no `handle` is given, results of the current `active
process` are returned. In either case, the process must have been
finishes before this keyword can be used. In practice this means
that processes started with `Start Process` must be finished either
with `Wait For Process` or `Terminate Process` before using this
keyword.
If no other arguments than the optional `handle` are given, a whole
`result object` is returned. If one or more of the other arguments
are given any true value, only the specified attributes of the
`result object` are returned. These attributes are always returned
in the same order as arguments are specified in the keyword signature.
See `Boolean arguments` section for more details about true and false
values.
Examples:
| Run Process | python | -c | print 'Hello, world!' | alias=myproc |
| # Get result object | | |
| ${result} = | Get Process Result | myproc |
| Should Be Equal | ${result.rc} | ${0} |
| Should Be Equal | ${result.stdout} | Hello, world! |
| Should Be Empty | ${result.stderr} | |
| # Get one attribute | | |
| ${stdout} = | Get Process Result | myproc | stdout=true |
| Should Be Equal | ${stdout} | Hello, world! |
| # Multiple attributes | | |
| ${stdout} | ${stderr} = | Get Process Result | myproc | stdout=yes | stderr=yes |
| Should Be Equal | ${stdout} | Hello, world! |
| Should Be Empty | ${stderr} | |
Although getting results of a previously executed process can be handy
in general, the main use case for this keyword is returning results
over the remote library interface. The remote interface does not
support returning the whole result object, but individual attributes
can be returned without problems.
New in Robot Framework 2.8.2.
"""
result = self._results[self._processes[handle]]
if result.rc is None:
raise RuntimeError('Getting results of unfinished processes '
'is not supported.')
attributes = self._get_result_attributes(result, rc, stdout, stderr,
stdout_path, stderr_path)
if not attributes:
return result
elif len(attributes) == 1:
return attributes[0]
return attributes
def _get_result_attributes(self, result, *includes):
attributes = (result.rc, result.stdout, result.stderr,
result.stdout_path, result.stderr_path)
includes = (is_true(incl) for incl in includes)
return tuple(attr for attr, incl in zip(attributes, includes) if incl)
def switch_process(self, handle):
"""Makes the specified process the current `active process`.
The handle can be an identifier returned by `Start Process` or
the `alias` given to it explicitly.
Example:
| Start Process | prog1 | alias=process1 |
| Start Process | prog2 | alias=process2 |
| # currently active process is process2 |
| Switch Process | process1 |
| # now active process is process1 |
"""
self._processes.switch(handle)
def _process_is_stopped(self, process, timeout):
max_time = time.time() + timeout
while time.time() <= max_time:
if process.poll() is not None:
return True
time.sleep(0.1)
return False
class ExecutionResult(object):
def __init__(self, process, stdout, stderr, rc=None):
self._process = process
self.stdout_path = self._get_path(stdout)
self.stderr_path = self._get_path(stderr)
self.rc = rc
self._stdout = None
self._stderr = None
self._custom_streams = [stream for stream in (stdout, stderr)
if self._is_custom_stream(stream)]
def _get_path(self, stream):
return stream.name if self._is_custom_stream(stream) else None
def _is_custom_stream(self, stream):
return stream not in (subprocess.PIPE, subprocess.STDOUT)
@property
def stdout(self):
if self._stdout is None:
self._read_stdout()
return self._stdout
@property
def stderr(self):
if self._stderr is None:
self._read_stderr()
return self._stderr
def _read_stdout(self):
self._stdout = self._read_stream(self.stdout_path, self._process.stdout)
def _read_stderr(self):
self._stderr = self._read_stream(self.stderr_path, self._process.stderr)
def _read_stream(self, stream_path, stream):
if stream_path:
stream = open(stream_path, 'r')
elif not self._is_open(stream):
return ''
try:
return self._format_output(stream.read())
finally:
if stream_path:
stream.close()
def _is_open(self, stream):
return stream and not stream.closed
def _format_output(self, output):
if output.endswith('\n'):
output = output[:-1]
return decode_output(output, force=True)
def close_streams(self):
standard_streams = self._get_and_read_standard_streams(self._process)
for stream in standard_streams + self._custom_streams:
if self._is_open(stream):
stream.close()
def _get_and_read_standard_streams(self, process):
stdin, stdout, stderr = process.stdin, process.stdout, process.stderr
if stdout:
self._read_stdout()
if stderr:
self._read_stderr()
return [stdin, stdout, stderr]
def __str__(self):
return '<result object with rc %d>' % self.rc
class ProcessConfig(object):
def __init__(self, cwd=None, shell=False, stdout=None, stderr=None,
alias=None, env=None, **rest):
self.cwd = self._get_cwd(cwd)
self.stdout_stream = self._new_stream(stdout)
self.stderr_stream = self._get_stderr(stderr, stdout, self.stdout_stream)
self.shell = is_true(shell)
self.alias = alias
self.env = self._construct_env(env, rest)
def _get_cwd(self, cwd):
if cwd:
return cwd.replace('/', os.sep)
return abspath('.')
def _new_stream(self, name):
if name:
name = name.replace('/', os.sep)
return open(os.path.join(self.cwd, name), 'w')
return subprocess.PIPE
def _get_stderr(self, stderr, stdout, stdout_stream):
if stderr and stderr in ['STDOUT', stdout]:
if stdout_stream != subprocess.PIPE:
return stdout_stream
return subprocess.STDOUT
return self._new_stream(stderr)
def _construct_env(self, env, extra):
if env:
env = dict((encode_to_system(k), encode_to_system(v))
for k, v in env.items())
for key in extra:
if not key.startswith('env:'):
raise RuntimeError("Keyword argument '%s' is not supported by "
"this keyword." % key)
if env is None:
env = os.environ.copy()
env[encode_to_system(key[4:])] = encode_to_system(extra[key])
return env
@property
def full_config(self):
config = {'stdout': self.stdout_stream,
'stderr': self.stderr_stream,
'stdin': subprocess.PIPE,
'shell': self.shell,
'cwd': self.cwd,
'env': self.env,
'universal_newlines': True}
if hasattr(os, 'setsid') and not sys.platform.startswith('java'):
config['preexec_fn'] = os.setsid
if hasattr(subprocess, 'CREATE_NEW_PROCESS_GROUP'):
config['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
return config
def __str__(self):
return encode_to_system("""\
cwd = %s
stdout_stream = %s
stderr_stream = %s
shell = %r
alias = %s
env = %r""" % (self.cwd, self.stdout_stream, self.stderr_stream,
self.shell, self.alias, self.env))
def is_true(argument):
if isinstance(argument, basestring) and argument.upper() == 'FALSE':
return False
return bool(argument)
| 43.843182
| 111
| 0.628998
|
58adfc497b54251db4d9d3004b471a7044f5020f
| 1,220
|
py
|
Python
|
main/migrations/0005_auto_20171108_0714.py
|
julian-londono/Capital_One_Optimization
|
8853e45919c1ec972b4a02c6678e0233fb8984e9
|
[
"MIT"
] | 1
|
2018-10-21T02:59:13.000Z
|
2018-10-21T02:59:13.000Z
|
main/migrations/0005_auto_20171108_0714.py
|
julian-londono/Capital_One_Optimization
|
8853e45919c1ec972b4a02c6678e0233fb8984e9
|
[
"MIT"
] | null | null | null |
main/migrations/0005_auto_20171108_0714.py
|
julian-londono/Capital_One_Optimization
|
8853e45919c1ec972b4a02c6678e0233fb8984e9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-08 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20171108_0659'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='host_since',
field=models.DateField(default='1998-01-02'),
),
migrations.AlterField(
model_name='listing',
name='lat',
field=models.DecimalField(decimal_places=15, default=0.0, max_digits=19),
),
migrations.AlterField(
model_name='listing',
name='long',
field=models.DecimalField(decimal_places=15, default=0.0, max_digits=19),
),
migrations.AlterField(
model_name='listing',
name='price_cleaning',
field=models.DecimalField(decimal_places=3, default=0.0, max_digits=8),
),
migrations.AlterField(
model_name='listing',
name='price_night',
field=models.DecimalField(decimal_places=3, default=0.0, max_digits=8),
),
]
| 29.756098
| 85
| 0.586066
|
f811cc7e878ee34c7bc9b274997e8c2679d46bc9
| 2,050
|
py
|
Python
|
tests/test_day06.py
|
FollowTheProcess/advent_of_code_2021
|
27427bf76732f5257d369a9455dbbb337dd7a0bc
|
[
"MIT"
] | null | null | null |
tests/test_day06.py
|
FollowTheProcess/advent_of_code_2021
|
27427bf76732f5257d369a9455dbbb337dd7a0bc
|
[
"MIT"
] | null | null | null |
tests/test_day06.py
|
FollowTheProcess/advent_of_code_2021
|
27427bf76732f5257d369a9455dbbb337dd7a0bc
|
[
"MIT"
] | null | null | null |
from src.day06.day06 import State, simulate
def test_state_parse():
input = "3,4,3,1,2\n"
assert State.parse(input) == State(current=[3, 4, 3, 1, 2])
def test_get_zeros():
current = [3, 4, 0, 1, 4, 0, 3, 1, 7, 0]
# Want the indices of every 0
want = [2, 5, 9]
state = State(current=current)
assert state.get_zeros() == want
def test_next():
current = [3, 4, 3, 1, 2]
state = State(current=current)
assert state.next() == [2, 3, 2, 0, 1]
def test_advance():
current = [3, 4, 3, 1, 2]
state = State(current=current)
state.advance()
assert state.current == [2, 3, 2, 0, 1]
def test_next_series_example():
start = [3, 4, 3, 1, 2]
# The intermediate states from the example
want = [
[2, 3, 2, 0, 1],
[1, 2, 1, 6, 0, 8],
[0, 1, 0, 5, 6, 7, 8],
[6, 0, 6, 4, 5, 6, 7, 8, 8],
[5, 6, 5, 3, 4, 5, 6, 7, 7, 8],
[4, 5, 4, 2, 3, 4, 5, 6, 6, 7],
[3, 4, 3, 1, 2, 3, 4, 5, 5, 6],
[2, 3, 2, 0, 1, 2, 3, 4, 4, 5],
[1, 2, 1, 6, 0, 1, 2, 3, 3, 4, 8],
[0, 1, 0, 5, 6, 0, 1, 2, 2, 3, 7, 8],
[6, 0, 6, 4, 5, 6, 0, 1, 1, 2, 6, 7, 8, 8, 8],
[5, 6, 5, 3, 4, 5, 6, 0, 0, 1, 5, 6, 7, 7, 7, 8, 8],
[4, 5, 4, 2, 3, 4, 5, 6, 6, 0, 4, 5, 6, 6, 6, 7, 7, 8, 8],
[3, 4, 3, 1, 2, 3, 4, 5, 5, 6, 3, 4, 5, 5, 5, 6, 6, 7, 7, 8],
[2, 3, 2, 0, 1, 2, 3, 4, 4, 5, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7],
[1, 2, 1, 6, 0, 1, 2, 3, 3, 4, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 8],
[0, 1, 0, 5, 6, 0, 1, 2, 2, 3, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 7, 8],
[6, 0, 6, 4, 5, 6, 0, 1, 1, 2, 6, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 8, 8, 8],
]
# Start at the beginning, the next state should be the next
# thing in want
state = State(start)
for i, item in enumerate(want):
state.advance()
assert state.current == item
def test_simulate_example_part1():
start = [3, 4, 3, 1, 2]
assert simulate(start, 18) == 26
assert simulate(start, 80) == 5934
| 26.973684
| 87
| 0.444878
|
feeb8fe1419a94ba252dbdc0f308012d41e52f94
| 5,380
|
py
|
Python
|
akshare/stock_feature/stock_em_analyst.py
|
szj2ys/akshare
|
d61ccbff50539bff1e07ffd15b841921b4846958
|
[
"MIT"
] | 1
|
2021-11-30T06:33:53.000Z
|
2021-11-30T06:33:53.000Z
|
akshare/stock_feature/stock_em_analyst.py
|
HustCoderHu/akshare
|
0ae3a936af2f552e1c41e2f10a0097f35d897c3b
|
[
"MIT"
] | null | null | null |
akshare/stock_feature/stock_em_analyst.py
|
HustCoderHu/akshare
|
0ae3a936af2f552e1c41e2f10a0097f35d897c3b
|
[
"MIT"
] | 1
|
2021-01-21T06:04:15.000Z
|
2021-01-21T06:04:15.000Z
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/11/15 13:39
Desc: 东方财富网-数据中心-研究报告-东方财富分析师指数
http://data.eastmoney.com/invest/invest/list.html
"""
import pandas as pd
import requests
def stock_em_analyst_rank(year: str = '2020') -> pd.DataFrame:
"""
东方财富网-数据中心-研究报告-东方财富分析师指数-东方财富分析师指数2020最新排行
http://data.eastmoney.com/invest/invest/list.html
:param year: 从 2015 年至今
:type year: str
:return: 东方财富分析师指数2020最新排行
:rtype: pandas.DataFrame
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
url = "http://data.eastmoney.com/dataapi/invest/data"
params = {
"st": year,
"sr": "1",
"p": "1",
"ps": "5000",
"name": "",
"type": "list",
"industrycode": "all",
"pageNo": "1",
"pageNum": "1",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_df = pd.DataFrame(data_json['data'])
del data_df['_id']
data_df.reset_index(inplace=True)
data_df['index'] = list(range(1, len(data_df)+1))
data_df.columns = [
"序号",
"_",
f"{year}年收益率",
"_",
"分析师名称",
"分析师单位",
"年度指数",
"3个月收益率",
"6个月收益率",
"12个月收益率",
f"{year}最新个股评级",
"_",
"_",
"分析师ID",
"_",
"成分股个数",
"_",
]
data_df = data_df[[
"序号",
"分析师名称",
"分析师单位",
"年度指数",
f"{year}年收益率",
"3个月收益率",
"6个月收益率",
"12个月收益率",
"成分股个数",
f"{year}最新个股评级",
'分析师ID',
]]
return data_df
def stock_em_analyst_detail(
analyst_id: str = "11000200926", indicator: str = "最新跟踪成分股"
) -> pd.DataFrame:
"""
东方财富网-数据中心-研究报告-东方财富分析师指数-东方财富分析师指数2020最新排行-分析师详情
http://data.eastmoney.com/invest/invest/11000200926.html
:param analyst_id: 分析师ID, 从 stock_em_analyst_rank 获取
:type analyst_id: str
:param indicator: ["最新跟踪成分股", "历史跟踪成分股", "历史指数"]
:type indicator: str
:return: 具体指标的数据
:rtype: pandas.DataFrame
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
url = "http://data.eastmoney.com/dataapi/invest/other"
if indicator == "最新跟踪成分股":
params = {
'href': '/api/Zgfxzs/json/AnalysisIndexNew.aspx',
'paramsstr': f'index=1&size=100&code={analyst_id}',
}
r = requests.get(url, params=params, headers=headers)
json_data = r.json()
temp_df = pd.DataFrame(json_data['re'])
temp_df.reset_index(inplace=True)
temp_df['index'] = list(range(1, len(temp_df)+1))
temp_df.columns = [
'序号',
'股票代码',
'股票名称',
'调入日期',
'当前评级名称',
'成交价格(前复权)',
'最新价格',
'最新评级日期',
"_",
'阶段涨跌幅',
]
temp_df = temp_df[[
'序号',
'股票代码',
'股票名称',
'调入日期',
'最新评级日期',
'当前评级名称',
'成交价格(前复权)',
'最新价格',
'阶段涨跌幅',
]]
return temp_df
elif indicator == "历史跟踪成分股":
params = {
'href': '/api/Zgfxzs/json/AnalysisIndexls.aspx',
'paramsstr': f'index=1&size=100&code={analyst_id}',
}
r = requests.get(url, params=params, headers=headers)
json_data = r.json()
temp_df = pd.DataFrame(json_data['re'])
temp_df.reset_index(inplace=True)
temp_df['index'] = list(range(1, len(temp_df) + 1))
temp_df.columns = [
'序号',
'股票代码',
'股票名称',
'调入日期',
'调出日期',
'调入时评级名称',
'调出原因',
'_',
'累计涨跌幅',
]
temp_df = temp_df[[
'序号',
'股票代码',
'股票名称',
'调入日期',
'调出日期',
'调入时评级名称',
'调出原因',
'累计涨跌幅',
]]
return temp_df
elif indicator == "历史指数":
params = {
'href': '/DataCenter_V3/chart/AnalystsIndex.ashx',
'paramsstr': f'code={analyst_id}&d=&isxml=True',
}
r = requests.get(url, params=params, headers=headers)
json_data = r.json()
temp_df = pd.DataFrame(
[json_data["X"].split(","), json_data["Y"][0].split(",")],
index=["date", "value"],
).T
return temp_df
if __name__ == "__main__":
stock_em_analyst_rank_df = stock_em_analyst_rank(year='2018')
print(stock_em_analyst_rank_df)
stock_em_analyst_detail_current_stock_df = stock_em_analyst_detail(
analyst_id="11000200926", indicator="最新跟踪成分股"
)
print(stock_em_analyst_detail_current_stock_df)
stock_em_analyst_detail_history_stock_df = stock_em_analyst_detail(
analyst_id="11000200926", indicator="历史跟踪成分股"
)
print(stock_em_analyst_detail_history_stock_df)
stock_em_analyst_detail_index_df = stock_em_analyst_detail(
analyst_id="11000200926", indicator="历史指数"
)
print(stock_em_analyst_detail_index_df)
| 28.465608
| 139
| 0.521004
|
033c7e4655dbc54d78747622531988cf988af810
| 3,731
|
py
|
Python
|
plotly/tests/test_core/test_api/test_v2/test_files.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 1
|
2018-07-16T01:51:47.000Z
|
2018-07-16T01:51:47.000Z
|
plotly/tests/test_core/test_api/test_v2/test_files.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/tests/test_core/test_api/test_v2/test_files.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 1
|
2019-02-18T04:12:56.000Z
|
2019-02-18T04:12:56.000Z
|
from __future__ import absolute_import
from plotly.api.v2 import files
from plotly.tests.test_core.test_api import PlotlyApiTestCase
class FilesTest(PlotlyApiTestCase):
def setUp(self):
super(FilesTest, self).setUp()
# Mock the actual api call, we don't want to do network tests here.
self.request_mock = self.mock('plotly.api.v2.utils.requests.request')
self.request_mock.return_value = self.get_response()
# Mock the validation function since we can test that elsewhere.
self.mock('plotly.api.v2.utils.validate_response')
def test_retrieve(self):
files.retrieve('hodor:88')
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, 'get')
self.assertEqual(
url, '{}/v2/files/hodor:88'.format(self.plotly_api_domain)
)
self.assertEqual(kwargs['params'], {})
def test_retrieve_share_key(self):
files.retrieve('hodor:88', share_key='foobar')
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, 'get')
self.assertEqual(
url, '{}/v2/files/hodor:88'.format(self.plotly_api_domain)
)
self.assertEqual(kwargs['params'], {'share_key': 'foobar'})
def test_update(self):
new_filename = '..zzZ ..zzZ'
files.update('hodor:88', body={'filename': new_filename})
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, 'put')
self.assertEqual(
url, '{}/v2/files/hodor:88'.format(self.plotly_api_domain)
)
self.assertEqual(kwargs['data'],
'{{"filename": "{}"}}'.format(new_filename))
def test_trash(self):
files.trash('hodor:88')
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, 'post')
self.assertEqual(
url, '{}/v2/files/hodor:88/trash'.format(self.plotly_api_domain)
)
def test_restore(self):
files.restore('hodor:88')
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, 'post')
self.assertEqual(
url, '{}/v2/files/hodor:88/restore'.format(self.plotly_api_domain)
)
def test_permanent_delete(self):
files.permanent_delete('hodor:88')
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, 'delete')
self.assertEqual(
url,
'{}/v2/files/hodor:88/permanent_delete'
.format(self.plotly_api_domain)
)
def test_lookup(self):
# requests does urlencode, so don't worry about the `' '` character!
path = '/mah plot'
parent = 43
user = 'someone'
exists = True
files.lookup(path=path, parent=parent, user=user, exists=exists)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
expected_params = {'path': path, 'parent': parent, 'exists': 'true',
'user': user}
self.assertEqual(method, 'get')
self.assertEqual(
url, '{}/v2/files/lookup'.format(self.plotly_api_domain)
)
self.assertEqual(kwargs['params'], expected_params)
| 35.533333
| 78
| 0.61324
|
7718dff260fec8778e57ab9a14df05e5c41f32d3
| 7,804
|
py
|
Python
|
user/user.py
|
heicat1337/BiliLiveBot
|
68474226cda7f76140911dbc1b0a608371c84110
|
[
"MIT"
] | 63
|
2020-02-12T15:41:27.000Z
|
2022-03-19T14:22:07.000Z
|
user/user.py
|
heicat1337/BiliLiveBot
|
68474226cda7f76140911dbc1b0a608371c84110
|
[
"MIT"
] | 16
|
2020-02-14T14:02:46.000Z
|
2022-01-10T11:38:00.000Z
|
user/user.py
|
heicat1337/BiliLiveBot
|
68474226cda7f76140911dbc1b0a608371c84110
|
[
"MIT"
] | 18
|
2020-02-18T04:52:33.000Z
|
2022-03-02T04:28:45.000Z
|
import asyncio
import json
from itertools import count
from typing import Callable, Optional
import conf_loader
import exceptions
import printer
from tasks.login import LoginTask
from web_session import WebSession
from .platform import AppPlatform, PcPlatform, TvPlatform
# user.toml 里面的东西全在self.dict_user里面,与 user 一一对应
# bili.toml 东西在 self.pc、self.app、self.tv 里面,因为这一串东西太复杂了,不方便在直接写在 User 里面了
class User:
_ids = count(0)
__slots__ = (
'id', 'force_sleep', 'name', 'password', 'alias', 'task_ctrl',
'task_arrangement', 'is_in_jail',
'bililive_session', 'login_session', 'other_session',
'dict_user', 'pc', 'app', 'tv', 'repost_del_lock',
'dyn_lottery_friends',
'_waiting_login', '_loop',
###
'manage_room', 'alerts', 'gift_comb_delay', 'alert_second', 'gift_thx_format', 'focus_thx_format',
'danmu_length', 'random_list_1', 'random_list_2', 'random_list_3',
'medal_update_format', 'medal_update_check_delay',
'guard_thx_format', 'fans_check_delay', 'only_live_thx',
'silver_gift_thx_format', 'gold_gift_thx_format', 'disable_gift_thx',
'reply', 'ban',
'height', 'weight',
'const_json', 'only_live_alert',
'anchor_alert_format',
)
def __init__(
self, dict_user: dict, task_ctrl: dict, task_arrangement: dict, dict_bili: dict, force_sleep: Callable):
self.id = next(self._ids)
self.force_sleep = force_sleep
self.name = dict_user['username']
self.password = dict_user['password']
self.alias = dict_user.get('alias', self.name)
self.task_ctrl = task_ctrl
self.task_arrangement = task_arrangement
self.is_in_jail = False # 是否小黑屋
##
self.manage_room = dict_user['manage_room']
self.alerts = dict_user.get('alerts', [])
self.gift_comb_delay = dict_user['gift_comb_delay']
self.alert_second = dict_user['alert_second']
self.gift_thx_format = dict_user.get('gift_thx_format', '感谢{username}投喂的{giftname}x{num}')
self.silver_gift_thx_format = dict_user.get('silver_gift_thx_format', self.gift_thx_format)
self.gold_gift_thx_format = dict_user.get('gold_gift_thx_format', self.gift_thx_format)
self.focus_thx_format = dict_user['focus_thx_format']
self.guard_thx_format = dict_user.get('guard_thx_format', self.gift_thx_format)
self.danmu_length = dict_user.get('danmu_length', 30)
self.medal_update_format = dict_user.get('medal_update_format', '')
self.medal_update_check_delay = dict_user.get('medal_update_check_delay', 30)
self.only_live_thx = dict_user.get('only_live_thx', False)
self.only_live_alert = dict_user.get('only_live_alert', True)
self.anchor_alert_format = dict_user.get('anchor_alert_format', '')
self.reply = dict_user.get('reply', [])
self.ban = dict_user.get('ban', [])
self.height = dict_user.get('height', 0)
self.weight = dict_user.get('weight', 0)
self.disable_gift_thx = dict_user.get('disable_gift_thx', False)
if dict_user.get('const_json'):
self.const_json = json.load(open(dict_user.get('const_json'), 'r'))
else:
self.const_json = {}
self.fans_check_delay = dict_user.get('fans_check_delay', 20)
self.random_list_1 = dict_user.get('random_list_1', [])
self.random_list_2 = dict_user.get('random_list_2', [])
self.random_list_3 = dict_user.get('random_list_3', [])
if len(self.random_list_1) == 0:
self.random_list_1 = [""]
if len(self.random_list_2) == 0:
self.random_list_2 = [""]
if len(self.random_list_3) == 0:
self.random_list_3 = [""]
##
self.bililive_session = WebSession()
self.login_session = WebSession()
self.other_session = WebSession()
# 每个user里面都分享了同一个dict,必须要隔离,否则更新cookie这些的时候会互相覆盖
self.pc = PcPlatform(dict_bili['pc_headers'].copy())
self.app = AppPlatform(dict_bili['app_headers'].copy(), dict_bili['app_params'])
self.tv = TvPlatform(dict_bili['tv_headers'].copy(), dict_bili['tv_params'])
self.dict_user = dict_user
self.update_login_data(dict_user)
self._waiting_login = None
self._loop = asyncio.get_event_loop()
self.repost_del_lock = asyncio.Lock() # 在follow与unfollow过程中必须保证安全(repost和del整个过程加锁)
dyn_lottery_friends = [(str(uid), name) for uid, name in task_ctrl['dyn_lottery_friends'].items()]
self.dyn_lottery_friends = dyn_lottery_friends # list (uid, name)
def update_login_data(self, login_data):
for key, value in login_data.items():
self.dict_user[key] = value
if key == 'cookie':
self.pc.update_cookie(value)
self.app.update_cookie(value)
self.tv.update_cookie(value)
conf_loader.write_user(login_data, self.id)
def is_online(self):
return self.pc.headers['cookie'] and self.app.headers['cookie'] and self.tv.headers['cookie']
def update_log(self):
conf_loader.write_user({'weight': self.weight, 'height': self.height}, self.id)
def info(
self,
*objects,
with_userid=True,
**kwargs):
if with_userid:
printer.info(
*objects,
**kwargs,
extra_info=f'用户id:{self.id} 名字:{self.alias}')
else:
printer.info(*objects, **kwargs)
def warn(self, *objects, **kwargs):
printer.warn(
*objects,
**kwargs,
extra_info=f'用户id:{self.id} 名字:{self.alias}')
def app_sign(self, extra_params: Optional[dict] = None) -> dict:
return self.app.sign(extra_params)
def tv_sign(self, extra_params: Optional[dict] = None) -> dict:
return self.tv.sign(extra_params)
async def req_s(self, func, *args, timeout=None):
while True:
if self._waiting_login is None:
try:
return await asyncio.wait_for(func(*args), timeout=timeout)
except asyncio.TimeoutError:
self.info(f'TASK {func} 请求超时,即将 CANCEL')
raise asyncio.CancelledError()
except exceptions.LogoutError: # logout
if self._waiting_login is None: # 当前没有处理的运行
self.info('判定出现了登陆失败,且未处理')
self._waiting_login = self._loop.create_future()
try:
await LoginTask.handle_login_status(self)
self.info('已经登陆了')
except asyncio.CancelledError: # 登陆中取消,把waiting_login设置,否则以后的req会一直堵塞
raise
finally:
self._waiting_login.set_result(-1)
self._waiting_login = None
else: # 已有处理的运行了
self.info('判定出现了登陆失败,已经处理')
await self._waiting_login
except exceptions.ForbiddenError:
await asyncio.shield(self.force_sleep(3600)) # bili_sched.force_sleep
await asyncio.sleep(3600) # 有的function不受sched控制,主动sleep即可,不cancel原因是怕堵死一些协程
else:
await self._waiting_login
def fall_in_jail(self):
self.is_in_jail = True
self.info(f'用户进入小黑屋')
def out_of_jail(self):
self.is_in_jail = False
self.info(f'抽奖脚本尝试性设置用户已出小黑屋(如果实际没出还会再判定进去)')
def print_status(self):
jail_status = '恭喜中奖' if self.is_in_jail else '自由之身'
self.info('当前用户的状态:', jail_status)
| 41.073684
| 116
| 0.614172
|
c1934b07b518d424b37a7a052d684195b42dd3bd
| 608
|
py
|
Python
|
momoko/__init__.py
|
Tsumanga-Studios/momoko
|
d967fc17251d013680aea648a36a4f4914226ba1
|
[
"MIT"
] | null | null | null |
momoko/__init__.py
|
Tsumanga-Studios/momoko
|
d967fc17251d013680aea648a36a4f4914226ba1
|
[
"MIT"
] | null | null | null |
momoko/__init__.py
|
Tsumanga-Studios/momoko
|
d967fc17251d013680aea648a36a4f4914226ba1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
momoko
======
Momoko wraps Psycopg2's functionality for use in Tornado.
Copyright 2011-2013 by Frank Smit.
MIT, see LICENSE for more details.
"""
import psycopg2
from psycopg2 import ProgrammingError
from .connection import Pool, Connection
from .exceptions import PoolError
from .utils import Op, WaitOp, WaitAllOps
try:
psycopg2.extensions.POLL_OK
except AttributeError:
import warnings
warnings.warn(RuntimeWarning(
'Psycopg2 does not have support for asynchronous connections. '
'You need at least version 2.2.0 of Psycopg2 to use Momoko.'))
| 22.518519
| 71
| 0.736842
|
9b932d4bc85bf4256a9cbecc55e9a829188aad4f
| 4,741
|
py
|
Python
|
kalliope/neurons/neurotimer/neurotimer.py
|
joshuaboniface/kalliope
|
0e040be3165e838485d1e5addc4d2c5df12bfd84
|
[
"MIT"
] | 1
|
2020-03-30T15:03:19.000Z
|
2020-03-30T15:03:19.000Z
|
kalliope/neurons/neurotimer/neurotimer.py
|
joshuaboniface/kalliope
|
0e040be3165e838485d1e5addc4d2c5df12bfd84
|
[
"MIT"
] | null | null | null |
kalliope/neurons/neurotimer/neurotimer.py
|
joshuaboniface/kalliope
|
0e040be3165e838485d1e5addc4d2c5df12bfd84
|
[
"MIT"
] | null | null | null |
import logging
import sys
import threading
import time
from kalliope.core import NeuronModule
from kalliope.core.NeuronModule import MissingParameterException, InvalidParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class TimerThread(threading.Thread):
def __init__(self, time_to_wait_seconds, callback):
"""
A Thread that will call the given callback method after waiting time_to_wait_seconds
:param time_to_wait_seconds: number of second to wait before call the callback method
:param callback: callback method
"""
threading.Thread.__init__(self)
self.time_to_wait_seconds = time_to_wait_seconds
self.callback = callback
def run(self):
# wait the amount of seconds
logger.debug("[Neurotimer] wait %s seconds" % self.time_to_wait_seconds)
time.sleep(self.time_to_wait_seconds)
# then run the callback method
self.callback()
class Neurotimer(NeuronModule):
def __init__(self, **kwargs):
super(Neurotimer, self).__init__(**kwargs)
# get parameters
self.seconds = kwargs.get('seconds', None)
self.minutes = kwargs.get('minutes', None)
self.hours = kwargs.get('hours', None)
self.synapse = kwargs.get('synapse', None)
self.forwarded_parameter = kwargs.get('forwarded_parameters', None)
# do some check
if self._is_parameters_ok():
# make the sum of all time parameter in seconds
retarding_time_seconds = self._get_retarding_time_seconds()
# now wait before running the target synapse
ds = TimerThread(time_to_wait_seconds=retarding_time_seconds, callback=self.callback_run_synapse)
# ds.daemon = True
ds.start()
def _is_parameters_ok(self):
"""
Check given neuron parameters are valid
:return: True if the neuron has been well configured
"""
# at least one time parameter must be set
if self.seconds is None and self.minutes is None and self.hours is None:
raise MissingParameterException("Neurotimer must have at least one time "
"parameter: seconds, minutes, hours")
self.seconds = self.get_integer_time_parameter(self.seconds)
self.minutes = self.get_integer_time_parameter(self.minutes)
self.hours = self.get_integer_time_parameter(self.hours)
if self.synapse is None:
raise MissingParameterException("Neurotimer must have a synapse name parameter")
return True
@staticmethod
def get_integer_time_parameter(time_parameter):
"""
Check if a given time parameter is a valid integer:
- must be > 0
- if type no an integer, must be convertible to integer
:param time_parameter: string or integer
:return: integer
"""
if time_parameter is not None:
if not isinstance(time_parameter, int):
# try to convert into integer
try:
time_parameter = int(time_parameter)
except ValueError:
raise InvalidParameterException("[Neurotimer] %s is not a valid integer" % time_parameter)
# check if positive
if time_parameter < 0:
raise InvalidParameterException("[Neurotimer] %s must be > 0" % time_parameter)
return time_parameter
def _get_retarding_time_seconds(self):
"""
Return the sum of given time parameters
seconds + minutes + hours
:return: integer, number of total seconds
"""
returned_time = 0
if self.seconds is not None:
returned_time += self.seconds
if self.minutes is not None:
returned_time += self.minutes * 60
if self.hours is not None:
returned_time += self.hours * 3600
logger.debug("[Neurotimer] get_retarding_time_seconds: %s" % returned_time)
return returned_time
def callback_run_synapse(self):
"""
Callback method which will be started by the timer thread once the time is over
:return:
"""
logger.debug("[Neurotimer] waiting time is over, start the synapse %s" % self.synapse)
# trick to remove unicode problem when loading jinja template with non ascii char
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
self.run_synapse_by_name(synapse_name=self.synapse,
high_priority=False,
overriding_parameter_dict=self.forwarded_parameter)
| 37.330709
| 110
| 0.639317
|
ba43a057ca3a6106ffb0b3b2ed9378abfe24ab3c
| 16,551
|
py
|
Python
|
homeassistant/helpers/config_entry_oauth2_flow.py
|
jamesmyatt/core
|
12da814470b7b4bb13b4d18b098aa4938c8a34ac
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/helpers/config_entry_oauth2_flow.py
|
jamesmyatt/core
|
12da814470b7b4bb13b4d18b098aa4938c8a34ac
|
[
"Apache-2.0"
] | 4
|
2022-03-01T06:32:53.000Z
|
2022-03-31T06:30:35.000Z
|
homeassistant/helpers/config_entry_oauth2_flow.py
|
jamesmyatt/core
|
12da814470b7b4bb13b4d18b098aa4938c8a34ac
|
[
"Apache-2.0"
] | null | null | null |
"""Config Flow using OAuth2.
This module exists of the following parts:
- OAuth2 config flow which supports multiple OAuth2 implementations
- OAuth2 implementation that works with local provided client ID/secret
"""
from abc import ABC, ABCMeta, abstractmethod
import asyncio
import logging
import secrets
import time
from typing import Any, Awaitable, Callable, Dict, Optional, cast
from aiohttp import client, web
import async_timeout
import jwt
import voluptuous as vol
from yarl import URL
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.network import NoURLAvailableError, get_url
from .aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
DATA_JWT_SECRET = "oauth2_jwt_secret"
DATA_VIEW_REGISTERED = "oauth2_view_reg"
DATA_IMPLEMENTATIONS = "oauth2_impl"
DATA_PROVIDERS = "oauth2_providers"
AUTH_CALLBACK_PATH = "/auth/external/callback"
CLOCK_OUT_OF_SYNC_MAX_SEC = 20
class AbstractOAuth2Implementation(ABC):
"""Base class to abstract OAuth2 authentication."""
@property
@abstractmethod
def name(self) -> str:
"""Name of the implementation."""
@property
@abstractmethod
def domain(self) -> str:
"""Domain that is providing the implementation."""
@abstractmethod
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize.
This step is called when a config flow is initialized. It should redirect the
user to the vendor website where they can authorize Home Assistant.
The implementation is responsible to get notified when the user is authorized
and pass this to the specified config flow. Do as little work as possible once
notified. You can do the work inside async_resolve_external_data. This will
give the best UX.
Pass external data in with:
await hass.config_entries.flow.async_configure(
flow_id=flow_id, user_input=external_data
)
"""
@abstractmethod
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve external data to tokens.
Turn the data that the implementation passed to the config flow as external
step data into tokens. These tokens will be stored as 'token' in the
config entry data.
"""
async def async_refresh_token(self, token: dict) -> dict:
"""Refresh a token and update expires info."""
new_token = await self._async_refresh_token(token)
# Force int for non-compliant oauth2 providers
new_token["expires_in"] = int(new_token["expires_in"])
new_token["expires_at"] = time.time() + new_token["expires_in"]
return new_token
@abstractmethod
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh a token."""
class LocalOAuth2Implementation(AbstractOAuth2Implementation):
"""Local OAuth2 implementation."""
def __init__(
self,
hass: HomeAssistant,
domain: str,
client_id: str,
client_secret: str,
authorize_url: str,
token_url: str,
):
"""Initialize local auth implementation."""
self.hass = hass
self._domain = domain
self.client_id = client_id
self.client_secret = client_secret
self.authorize_url = authorize_url
self.token_url = token_url
@property
def name(self) -> str:
"""Name of the implementation."""
return "Configuration.yaml"
@property
def domain(self) -> str:
"""Domain providing the implementation."""
return self._domain
@property
def redirect_uri(self) -> str:
"""Return the redirect uri."""
return f"{get_url(self.hass, require_current_request=True)}{AUTH_CALLBACK_PATH}"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize."""
return str(
URL(self.authorize_url)
.with_query(
{
"response_type": "code",
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"state": _encode_jwt(self.hass, {"flow_id": flow_id}),
}
)
.update_query(self.extra_authorize_data)
)
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve the authorization code to tokens."""
return await self._token_request(
{
"grant_type": "authorization_code",
"code": external_data,
"redirect_uri": self.redirect_uri,
}
)
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
new_token = await self._token_request(
{
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": token["refresh_token"],
}
)
return {**token, **new_token}
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
data["client_id"] = self.client_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
resp = await session.post(self.token_url, data=data)
resp.raise_for_status()
return cast(dict, await resp.json())
class AbstractOAuth2FlowHandler(config_entries.ConfigFlow, metaclass=ABCMeta):
"""Handle a config flow."""
DOMAIN = ""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN
def __init__(self) -> None:
"""Instantiate config flow."""
if self.DOMAIN == "":
raise TypeError(
f"Can't instantiate class {self.__class__.__name__} without DOMAIN being set"
)
self.external_data: Any = None
self.flow_impl: AbstractOAuth2Implementation = None # type: ignore
@property
@abstractmethod
def logger(self) -> logging.Logger:
"""Return logger."""
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_step_pick_implementation(
self, user_input: Optional[dict] = None
) -> dict:
"""Handle a flow start."""
assert self.hass
implementations = await async_get_implementations(self.hass, self.DOMAIN)
if user_input is not None:
self.flow_impl = implementations[user_input["implementation"]]
return await self.async_step_auth()
if not implementations:
return self.async_abort(reason="missing_configuration")
if len(implementations) == 1:
# Pick first implementation as we have only one.
self.flow_impl = list(implementations.values())[0]
return await self.async_step_auth()
return self.async_show_form(
step_id="pick_implementation",
data_schema=vol.Schema(
{
vol.Required(
"implementation", default=list(implementations.keys())[0]
): vol.In({key: impl.name for key, impl in implementations.items()})
}
),
)
async def async_step_auth(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Create an entry for auth."""
# Flow has been triggered by external data
if user_input:
self.external_data = user_input
return self.async_external_step_done(next_step_id="creation")
try:
with async_timeout.timeout(10):
url = await self.flow_impl.async_generate_authorize_url(self.flow_id)
except asyncio.TimeoutError:
return self.async_abort(reason="authorize_url_timeout")
except NoURLAvailableError:
return self.async_abort(
reason="no_url_available",
description_placeholders={
"docs_url": "https://www.home-assistant.io/more-info/no-url-available"
},
)
url = str(URL(url).update_query(self.extra_authorize_data))
return self.async_external_step(step_id="auth", url=url)
async def async_step_creation(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Create config entry from external data."""
token = await self.flow_impl.async_resolve_external_data(self.external_data)
# Force int for non-compliant oauth2 providers
try:
token["expires_in"] = int(token["expires_in"])
except ValueError as err:
_LOGGER.warning("Error converting expires_in to int: %s", err)
return self.async_abort(reason="oauth_error")
token["expires_at"] = time.time() + token["expires_in"]
self.logger.info("Successfully authenticated")
return await self.async_oauth_create_entry(
{"auth_implementation": self.flow_impl.domain, "token": token}
)
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an entry for the flow.
Ok to override if you want to fetch extra info or even add another step.
"""
return self.async_create_entry(title=self.flow_impl.name, data=data)
async def async_step_discovery(
self, discovery_info: Dict[str, Any]
) -> Dict[str, Any]:
"""Handle a flow initialized by discovery."""
await self.async_set_unique_id(self.DOMAIN)
assert self.hass is not None
if self.hass.config_entries.async_entries(self.DOMAIN):
return self.async_abort(reason="already_configured")
return await self.async_step_pick_implementation()
async_step_user = async_step_pick_implementation
async_step_mqtt = async_step_discovery
async_step_ssdp = async_step_discovery
async_step_zeroconf = async_step_discovery
async_step_homekit = async_step_discovery
@classmethod
def async_register_implementation(
cls, hass: HomeAssistant, local_impl: LocalOAuth2Implementation
) -> None:
"""Register a local implementation."""
async_register_implementation(hass, cls.DOMAIN, local_impl)
@callback
def async_register_implementation(
hass: HomeAssistant, domain: str, implementation: AbstractOAuth2Implementation
) -> None:
"""Register an OAuth2 flow implementation for an integration."""
if isinstance(implementation, LocalOAuth2Implementation) and not hass.data.get(
DATA_VIEW_REGISTERED, False
):
hass.http.register_view(OAuth2AuthorizeCallbackView()) # type: ignore
hass.data[DATA_VIEW_REGISTERED] = True
implementations = hass.data.setdefault(DATA_IMPLEMENTATIONS, {})
implementations.setdefault(domain, {})[implementation.domain] = implementation
async def async_get_implementations(
hass: HomeAssistant, domain: str
) -> Dict[str, AbstractOAuth2Implementation]:
"""Return OAuth2 implementations for specified domain."""
registered = cast(
Dict[str, AbstractOAuth2Implementation],
hass.data.setdefault(DATA_IMPLEMENTATIONS, {}).get(domain, {}),
)
if DATA_PROVIDERS not in hass.data:
return registered
registered = dict(registered)
for provider_domain, get_impl in hass.data[DATA_PROVIDERS].items():
implementation = await get_impl(hass, domain)
if implementation is not None:
registered[provider_domain] = implementation
return registered
async def async_get_config_entry_implementation(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> AbstractOAuth2Implementation:
"""Return the implementation for this config entry."""
implementations = await async_get_implementations(hass, config_entry.domain)
implementation = implementations.get(config_entry.data["auth_implementation"])
if implementation is None:
raise ValueError("Implementation not available")
return implementation
@callback
def async_add_implementation_provider(
hass: HomeAssistant,
provider_domain: str,
async_provide_implementation: Callable[
[HomeAssistant, str], Awaitable[Optional[AbstractOAuth2Implementation]]
],
) -> None:
"""Add an implementation provider.
If no implementation found, return None.
"""
hass.data.setdefault(DATA_PROVIDERS, {})[
provider_domain
] = async_provide_implementation
class OAuth2AuthorizeCallbackView(HomeAssistantView):
"""OAuth2 Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = "auth:external:callback"
async def get(self, request: web.Request) -> web.Response:
"""Receive authorization code."""
if "code" not in request.query or "state" not in request.query:
return web.Response(
text=f"Missing code or state parameter in {request.url}"
)
hass = request.app["hass"]
state = _decode_jwt(hass, request.query["state"])
if state is None:
return web.Response(text="Invalid state")
await hass.config_entries.flow.async_configure(
flow_id=state["flow_id"], user_input=request.query["code"]
)
return web.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>",
)
class OAuth2Session:
"""Session to make requests authenticated with OAuth2."""
def __init__(
self,
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: AbstractOAuth2Implementation,
):
"""Initialize an OAuth2 session."""
self.hass = hass
self.config_entry = config_entry
self.implementation = implementation
@property
def token(self) -> dict:
"""Return the token."""
return cast(dict, self.config_entry.data["token"])
@property
def valid_token(self) -> bool:
"""Return if token is still valid."""
return (
cast(float, self.token["expires_at"])
> time.time() + CLOCK_OUT_OF_SYNC_MAX_SEC
)
async def async_ensure_token_valid(self) -> None:
"""Ensure that the current token is valid."""
if self.valid_token:
return
new_token = await self.implementation.async_refresh_token(self.token)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, "token": new_token}
)
async def async_request(
self, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make a request."""
await self.async_ensure_token_valid()
return await async_oauth2_request(
self.hass, self.config_entry.data["token"], method, url, **kwargs
)
async def async_oauth2_request(
hass: HomeAssistant, token: dict, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make an OAuth2 authenticated request.
This method will not refresh tokens. Use OAuth2 session for that.
"""
session = async_get_clientsession(hass)
return await session.request(
method,
url,
**kwargs,
headers={
**(kwargs.get("headers") or {}),
"authorization": f"Bearer {token['access_token']}",
},
)
@callback
def _encode_jwt(hass: HomeAssistant, data: dict) -> str:
"""JWT encode data."""
secret = hass.data.get(DATA_JWT_SECRET)
if secret is None:
secret = hass.data[DATA_JWT_SECRET] = secrets.token_hex()
return jwt.encode(data, secret, algorithm="HS256").decode()
@callback
def _decode_jwt(hass: HomeAssistant, encoded: str) -> Optional[dict]:
"""JWT encode data."""
secret = cast(str, hass.data.get(DATA_JWT_SECRET))
try:
return jwt.decode(encoded, secret, algorithms=["HS256"])
except jwt.InvalidTokenError:
return None
| 32.709486
| 93
| 0.650595
|
9f142ad07eac065622312206def240afbc96537b
| 1,682
|
py
|
Python
|
thermo/management/commands/get_weather.py
|
tprzybylek/RPi_thermometer
|
4dc59985accdd5838b11877f128e0cbe80af79ca
|
[
"MIT"
] | null | null | null |
thermo/management/commands/get_weather.py
|
tprzybylek/RPi_thermometer
|
4dc59985accdd5838b11877f128e0cbe80af79ca
|
[
"MIT"
] | 4
|
2021-03-30T13:01:41.000Z
|
2021-09-22T18:48:51.000Z
|
thermo/management/commands/get_weather.py
|
tprzybylek/RPi_thermometer
|
4dc59985accdd5838b11877f128e0cbe80af79ca
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from thermo.models import Record
from datetime import datetime, timedelta
from django.db.models import Q
import pytz
import requests
class Command(BaseCommand):
help = 'Retrieves weather data from OpenWeatherMap'
def handle(self, *args, **options):
def round_datetime(t):
dt = timedelta(hours=t.minute // 30)
t = t.replace(second=0, microsecond=0, minute=0)
t = t + dt
return t
def create_record():
response = requests. \
get(request_url, params={'id': city_id, 'appid': api_key, 'units': 'metric', 'lang': 'pl'}) \
.json()
utc = pytz.utc
ts = response['dt']
ts = round_datetime(utc.localize(datetime.utcfromtimestamp(ts)))
defaults = {
'outdoor_temperature': response['main']['temp'],
'outdoor_humidity': response['main']['humidity'],
}
Record.objects.update_or_create(
date=ts,
defaults=defaults
)
api_key = 'df2fbf0c442d4a6319693cb6e72cdb49'
city_id = '3081368'
request_url = 'http://api.openweathermap.org/data/2.5/weather'
last_record = Record.objects.filter(Q(outdoor_temperature__isnull=False)
| Q(outdoor_humidity__isnull=False)).order_by('date').last()
if last_record:
time_diff = datetime.now(pytz.utc) - last_record.date
if time_diff.total_seconds() / 3600 > 1:
create_record()
else:
create_record()
| 32.346154
| 109
| 0.56956
|
57120b4d1a1c73d193b5dc7985fd75b60044bf58
| 2,251
|
py
|
Python
|
forms/forms110.py
|
VoprosiKira/l2
|
49f3624b4b2d25329a8e3aa166b03735cf759c37
|
[
"MIT"
] | null | null | null |
forms/forms110.py
|
VoprosiKira/l2
|
49f3624b4b2d25329a8e3aa166b03735cf759c37
|
[
"MIT"
] | null | null | null |
forms/forms110.py
|
VoprosiKira/l2
|
49f3624b4b2d25329a8e3aa166b03735cf759c37
|
[
"MIT"
] | null | null | null |
from reportlab.lib.pagesizes import A4
from forms.sql_func import get_extra_notification_data_for_pdf
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
from reportlab.lib.units import mm
from reportlab.lib.enums import TA_JUSTIFY
from forms.views import get_epid_data
from laboratory.settings import FONTS_FOLDER
import os.path
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
import simplejson as json
from io import BytesIO
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak
from laboratory.settings import EXTRA_MASTER_RESEARCH_PK, EXTRA_SLAVE_RESEARCH_PK
from utils.dates import normalize_date
def form_01(request_data):
# Результат Экстренные извещения
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifReg"
style.fontSize = 9
style.alignment = TA_JUSTIFY
directions = [x for x in json.loads(request_data["pk"]) if x is not None]
buffer = BytesIO()
doc = SimpleDocTemplate(buffer, pagesize=A4, leftMargin=25 * mm, rightMargin=5 * mm, topMargin=6 * mm, bottomMargin=6 * mm, allowSplitting=1, title="Форма {}".format("Эпид. извещение"))
data_result = get_epid_data(directions, 1)
objs = []
for k, v in data_result.items():
opinion = [
[Paragraph('Эпид Номер', style), Paragraph(v.get('epid_value'), style)],
]
for i in v.get('master_field_results'):
opinion.append(
[Paragraph(i.get('master_field_title'), style), Paragraph(i.get('master_value'), style)],
)
tbl = Table(opinion, colWidths=(60 * mm, 120 * mm))
tbl.setStyle(
TableStyle(
[
('GRID', (0, 0), (-1, -1), 1.0, colors.black),
('BOTTOMPADDING', (0, 0), (-1, -1), 0.1 * mm),
]
)
)
objs.append(Spacer(1, 3 * mm))
objs.append(tbl)
objs.append(PageBreak())
doc.build(objs)
pdf = buffer.getvalue()
buffer.close()
return pdf
| 34.106061
| 189
| 0.668147
|
6dcb66d99a2889da896cc1bd9d6a9710e68cf030
| 27,185
|
py
|
Python
|
sidpy/hdf/dtype_utils.py
|
ondrejdyck/sidpy
|
779034440b8233e1dae609a58a64ce2d25ca41c0
|
[
"MIT"
] | 5
|
2020-10-07T14:34:32.000Z
|
2021-11-17T11:25:06.000Z
|
sidpy/hdf/dtype_utils.py
|
ondrejdyck/sidpy
|
779034440b8233e1dae609a58a64ce2d25ca41c0
|
[
"MIT"
] | 94
|
2020-07-31T17:34:23.000Z
|
2022-02-11T21:57:09.000Z
|
sidpy/hdf/dtype_utils.py
|
ondrejdyck/sidpy
|
779034440b8233e1dae609a58a64ce2d25ca41c0
|
[
"MIT"
] | 15
|
2020-08-16T14:22:47.000Z
|
2021-08-20T18:15:37.000Z
|
# -*- coding: utf-8 -*-
"""
Utilities for transforming and validating data types
Given that many of the data transformations involve copying the data, they should
ideally happen in a lazy manner to avoid memory issues.
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, absolute_import, unicode_literals, print_function
import sys
from warnings import warn
import h5py
import numpy as np
import dask.array as da
__all__ = ['flatten_complex_to_real', 'get_compound_sub_dtypes', 'flatten_compound_to_real', 'check_dtype',
'stack_real_to_complex', 'validate_dtype', 'is_complex_dtype',
'stack_real_to_compound', 'stack_real_to_target_dtype', 'flatten_to_real']
from sidpy.hdf.hdf_utils import lazy_load_array
if sys.version_info.major == 3:
unicode = str
def flatten_complex_to_real(dataset, lazy=False):
"""
Stacks the real values followed by the imaginary values in the last dimension of the given N dimensional matrix.
Thus a complex matrix of shape (2, 3, 5) will turn into a matrix of shape (2, 3, 10)
Parameters
----------
dataset : array-like or :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Dataset of complex data type
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
-------
retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
real valued dataset
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> length = 3
>>> complex_array = np.random.randint(-5, high=5, size=length) + 1j * np.random.randint(-5, high=5, size=length)
>>> print('Complex value: {} has shape: {}'.format(complex_array, complex_array.shape))
Complex value: [2.-2.j 0.-3.j 0.-4.j] has shape: (3,)
>>> stacked_real_array = sidpy.dtype_utils.flatten_complex_to_real(complex_array)
>>> print('Stacked real value: {} has shape: '
>>> '{}'.format(stacked_real_array, stacked_real_array.shape))
Stacked real value: [ 2. 0. 0. -2. -3. -4.] has shape: (6,)
"""
if not isinstance(dataset, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('dataset should either be a h5py.Dataset or numpy / dask array')
if not is_complex_dtype(dataset.dtype):
raise TypeError("Expected a complex valued dataset")
if isinstance(dataset, da.core.Array):
lazy = True
xp = np
if lazy:
dataset = lazy_load_array(dataset)
xp = da
axis = xp.array(dataset).ndim - 1
if axis == -1:
return xp.hstack([xp.real(dataset), xp.imag(dataset)])
else: # along the last axis
return xp.concatenate([xp.real(dataset), xp.imag(dataset)], axis=axis)
def flatten_compound_to_real(dataset, lazy=False):
"""
Flattens the individual components in a structured array or compound valued hdf5 dataset along the last axis to form
a real valued array. Thus a compound h5py.Dataset or structured numpy matrix of shape (2, 3, 5) having 3 components
will turn into a real valued matrix of shape (2, 3, 15), assuming that all the sub-dtypes of the matrix are real
valued. ie - this function does not handle structured dtypes having complex values
Parameters
----------
dataset : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Numpy array that is a structured array or a :class:`h5py.Dataset` of compound dtype
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
-------
retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
real valued dataset
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> num_elems = 5
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> structured_array = np.zeros(shape=num_elems, dtype=struct_dtype)
>>> structured_array['r'] = np.random.random(size=num_elems) * 1024
>>> structured_array['g'] = np.random.randint(0, high=1024, size=num_elems)
>>> structured_array['b'] = np.random.random(size=num_elems) * 1024
>>> print('Structured array is of shape {} and have values:'.format(structured_array.shape))
>>> print(structured_array)
Structured array is of shape (5,) and have values:
[(859.62445, 54, 1012.22256219) (959.5565 , 678, 296.19788769)
(383.20737, 689, 192.45427816) (201.56635, 889, 939.01082338)
(334.22015, 467, 980.9081472 )]
>>> real_array = sidpy.dtype_utils.flatten_compound_to_real(structured_array)
>>> print("This array converted to regular scalar matrix has shape: {} and values:".format(real_array.shape))
>>> print(real_array)
This array converted to regular scalar matrix has shape: (15,) and values:
[ 859.62445068 959.55651855 383.20736694 201.56634521 334.22015381
54. 678. 689. 889. 467.
1012.22256219 296.19788769 192.45427816 939.01082338 980.9081472 ]
"""
if isinstance(dataset, h5py.Dataset):
if len(dataset.dtype) == 0:
raise TypeError("Expected compound h5py dataset")
if lazy:
xp = da
dataset = lazy_load_array(dataset)
else:
xp = np
warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')
return xp.concatenate([xp.array(dataset[name]) for name in dataset.dtype.names], axis=len(dataset.shape) - 1)
elif isinstance(dataset, (np.ndarray, da.core.Array)):
if isinstance(dataset, da.core.Array):
lazy = True
xp = np
if lazy:
dataset = lazy_load_array(dataset)
xp = da
if len(dataset.dtype) == 0:
raise TypeError("Expected structured array")
if dataset.ndim > 0:
return xp.concatenate([dataset[name] for name in dataset.dtype.names], axis=dataset.ndim - 1)
else:
return xp.hstack([dataset[name] for name in dataset.dtype.names])
elif isinstance(dataset, np.void):
return np.hstack([dataset[name] for name in dataset.dtype.names])
else:
raise TypeError('Datatype {} not supported'.format(type(dataset)))
def flatten_to_real(ds_main, lazy=False):
"""
Flattens complex / compound / real valued arrays to real valued arrays
Parameters
----------
ds_main : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Compound, complex or real valued numpy array or HDF5 dataset
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_main : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
Array raveled to a float data type
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> num_elems = 5
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> structured_array = np.zeros(shape=num_elems, dtype=struct_dtype)
>>> structured_array['r'] = np.random.random(size=num_elems) * 1024
>>> structured_array['g'] = np.random.randint(0, high=1024, size=num_elems)
>>> structured_array['b'] = np.random.random(size=num_elems) * 1024
>>> print('Structured array is of shape {} and have values:'.format(structured_array.shape))
>>> print(structured_array)
Structured array is of shape (5,) and have values:
[(859.62445, 54, 1012.22256219) (959.5565 , 678, 296.19788769)
(383.20737, 689, 192.45427816) (201.56635, 889, 939.01082338)
(334.22015, 467, 980.9081472 )]
>>> real_array = sidpy.dtype_utils.flatten_to_real(structured_array)
>>> print('This array converted to regular scalar matrix has shape: {} and values:'.format(real_array.shape))
>>> print(real_array)
This array converted to regular scalar matrix has shape: (15,) and values:
[ 859.62445068 959.55651855 383.20736694 201.56634521 334.22015381
54. 678. 689. 889. 467.
1012.22256219 296.19788769 192.45427816 939.01082338 980.9081472 ]
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray, da.core.Array)):
ds_main = np.array(ds_main)
if is_complex_dtype(ds_main.dtype):
return flatten_complex_to_real(ds_main, lazy=lazy)
elif len(ds_main.dtype) > 0:
return flatten_compound_to_real(ds_main, lazy=lazy)
else:
return ds_main
def get_compound_sub_dtypes(struct_dtype):
"""
Returns a dictionary of the dtypes of each of the fields in the given structured array dtype
Parameters
----------
struct_dtype : :class:`numpy.dtype`
dtype of a structured array
Returns
-------
dtypes : dict
Dictionary whose keys are the field names and values are the corresponding dtypes
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> sub_dtypes = sidpy.dtype_utils.get_compound_sub_dtypes(struct_dtype)
>>> for key, val in sub_dtypes.items():
>>> print('{} : {}'.format(key, val))
g : uint16
r : float32
b : float64
"""
if not isinstance(struct_dtype, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
dtypes = dict()
for field_name in struct_dtype.fields:
dtypes[field_name] = struct_dtype.fields[field_name][0]
return dtypes
def check_dtype(h5_dset):
"""
Checks the datatype of the input HDF5 dataset and provides the appropriate
function calls to convert it to a float
Parameters
----------
h5_dset : :class:`h5py.Dataset`
Dataset of interest
Returns
-------
func : callable
function that will convert the dataset to a float
is_complex : bool
is the input dataset complex?
is_compound : bool
is the input dataset compound?
n_features : Unsigned int
Unsigned integer - the length of the 2nd dimension of the data after `func` is called on it
type_mult : Unsigned int
multiplier that converts from the typesize of the input :class:`~numpy.dtype` to the
typesize of the data after func is run on it
Examples
--------
>>> import numpy as np
>>> import h5py
>>> import sidpy
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> file_path = 'dtype_utils_example.h5'
>>> if os.path.exists(file_path):
>>> os.remove(file_path)
>>> with h5py.File(file_path, mode='w') as h5_f:
>>> num_elems = (5, 7)
>>> structured_array = np.zeros(shape=num_elems, dtype=struct_dtype)
>>> structured_array['r'] = 450 * np.random.random(size=num_elems)
>>> structured_array['g'] = np.random.randint(0, high=1024, size=num_elems)
>>> structured_array['b'] = 3178 * np.random.random(size=num_elems)
>>> _ = h5_f.create_dataset('compound', data=structured_array)
>>> _ = h5_f.create_dataset('real', data=450 * np.random.random(size=num_elems), dtype=np.float16)
>>> _ = h5_f.create_dataset('complex', data=np.random.random(size=num_elems) + 1j * np.random.random(size=num_elems),
>>> dtype=np.complex64)
>>> h5_f.flush()
>>> # Now, lets test the the function on compound-, complex-, and real-valued HDF5 datasets:
>>> def check_dataset(h5_dset):
>>> print('\tDataset being tested: {}'.format(h5_dset))
>>> func, is_complex, is_compound, n_features, type_mult = sidpy.dtype_utils.check_dtype(h5_dset)
>>> print('\tFunction to transform to real: %s' % func)
>>> print('\tis_complex? %s' % is_complex)
>>> print('\tis_compound? %s' % is_compound)
>>> print('\tShape of dataset in its current form: {}'.format(h5_dset.shape))
>>> print('\tAfter flattening to real, shape is expected to be: ({}, {})'.format(h5_dset.shape[0], n_features))
>>> print('\tByte-size of a single element in its current form: {}'.format(type_mult))
>>> with h5py.File(file_path, mode='r') as h5_f:
>>> print('Checking a compound-valued dataset:')
>>> check_dataset(h5_f['compound'])
>>> print('')
>>> print('Checking a complex-valued dataset:')
>>> check_dataset(h5_f['complex'])
>>> print('')
>>> print('Checking a real-valued dataset:')
>>> check_dataset(h5_f['real'])
>>> os.remove(file_path)
Checking a compound-valued dataset:
Dataset being tested: <HDF5 dataset "compound": shape (5, 7), type "|V14">
Function to transform to real: <function flatten_compound_to_real at 0x112c130d0>
is_complex? False
is_compound? True
Shape of dataset in its current form: (5, 7)
After flattening to real, shape is expected to be: (5, 21)
Byte-size of a single element in its current form: 12
- - - - - - - - - - - - - - - - - -
Checking a complex-valued dataset:
Dataset being tested: <HDF5 dataset "complex": shape (5, 7), type "<c8">
Function to transform to real: <function flatten_complex_to_real at 0x112c13048>
is_complex? True
is_compound? False
Shape of dataset in its current form: (5, 7)
After flattening to real, shape is expected to be: (5, 14)
Byte-size of a single element in its current form: 8
- - - - - - - - - - - - - - - - - -
Checking a real-valued dataset:
Dataset being tested: <HDF5 dataset "real": shape (5, 7), type "<f2">
Function to transform to real: <class 'numpy.float32'>
is_complex? False
is_compound? False
Shape of dataset in its current form: (5, 7)
After flattening to real, shape is expected to be: (5, 7)
Byte-size of a single element in its current form: 4
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
is_complex = False
is_compound = False
in_dtype = h5_dset.dtype
# TODO: avoid assuming 2d shape - why does one even need n_samples!? We only care about the last dimension!
n_features = h5_dset.shape[-1]
if is_complex_dtype(h5_dset.dtype):
is_complex = True
new_dtype = np.real(h5_dset[0, 0]).dtype
type_mult = new_dtype.itemsize * 2
func = flatten_complex_to_real
n_features *= 2
elif len(h5_dset.dtype) > 0:
"""
Some form of structured numpy is in use
We only support real scalars for the component types at the current time
"""
is_compound = True
# TODO: Avoid hard-coding to float32
new_dtype = np.float32
type_mult = len(in_dtype) * new_dtype(0).itemsize
func = flatten_compound_to_real
n_features *= len(in_dtype)
else:
if h5_dset.dtype not in [np.float32, np.float64]:
new_dtype = np.float32
else:
new_dtype = h5_dset.dtype.type
type_mult = new_dtype(0).itemsize
func = new_dtype
return func, is_complex, is_compound, n_features, type_mult
def stack_real_to_complex(ds_real, lazy=False):
"""
Puts the real and imaginary sections of the provided matrix (in the last axis) together to make complex matrix
Parameters
------------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, 2 x features],
where the first half of the features are the real component and the
second half contains the imaginary components
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
2D complex array arranged as [sample, features]
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> real_val = np.hstack([5 * np.random.rand(6),
>>> 7 * np.random.rand(6)])
>>> print('Real valued dataset of shape {}:'.format(real_val.shape))
>>> print(real_val)
Real valued dataset of shape (12,):
[3.59249723 1.05674621 4.41035214 1.84720102 1.79672691 4.7636207
3.09574246 0.76396171 3.38140637 4.97629028 0.83303717 0.32816285]
>>> comp_val = sidpy.dtype_utils.stack_real_to_complex(real_val)
>>> print('Complex-valued array of shape: {}'.format(comp_val.shape))
>>> print(comp_val)
Complex-valued array of shape: (6,)
[3.59249723+3.09574246j 1.05674621+0.76396171j 4.41035214+3.38140637j
1.84720102+4.97629028j 1.79672691+0.83303717j 4.7636207 +0.32816285j]
"""
if not isinstance(ds_real, (np.ndarray, da.core.Array, h5py.Dataset)):
if not isinstance(ds_real, (tuple, list)):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
if is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if ds_real.shape[-1] / 2 != ds_real.shape[-1] // 2:
raise ValueError("Last dimension must be even sized")
half_point = ds_real.shape[-1] // 2
if isinstance(ds_real, da.core.Array):
lazy = True
if lazy and not isinstance(ds_real, da.core.Array):
ds_real = lazy_load_array(ds_real)
return ds_real[..., :half_point] + 1j * ds_real[..., half_point:]
def stack_real_to_compound(ds_real, compound_type, lazy=False):
"""
Converts a real-valued dataset to a compound dataset (along the last axis) of the provided compound d-type
Parameters
------------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, features]
compound_type : :class:`numpy.dtype`
Target complex data-type
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
N-dimensional complex-valued array arranged as [sample, features]
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> num_elems = 5
>>> real_val = np.concatenate((np.random.random(size=num_elems) * 1024,
>>> np.random.randint(0, high=1024, size=num_elems),
>>> np.random.random(size=num_elems) * 1024))
>>> print('Real valued dataset of shape {}:'.format(real_val.shape))
>>> print(real_val)
Real valued dataset of shape (15,):
[276.65339095 527.80665658 741.38145798 647.06743252 710.41729083
380. 796. 504. 355. 985.
960.70015068 567.47024098 881.25140299 105.48936013 933.13686734]
>>> comp_val = sidpy.dtype_utils.stack_real_to_compound(real_val, struct_dtype)
>>> print('Structured array of shape: {}'.format(comp_val.shape))
>>> print(comp_val)
Structured array of shape: (5,)
[(276.65338, 380, 960.70015068) (527.80664, 796, 567.47024098)
(741.3815 , 504, 881.25140299) (647.06744, 355, 105.48936013)
(710.4173 , 985, 933.13686734)]
"""
if lazy or isinstance(ds_real, da.core.Array):
raise NotImplementedError('Lazy operation not available due to absence of Dask support')
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, (list, tuple)):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
elif is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if not isinstance(compound_type, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
new_spec_length = ds_real.shape[-1] / len(compound_type)
if new_spec_length % 1:
raise ValueError('Provided compound type was not compatible by number of elements')
new_spec_length = int(new_spec_length)
new_shape = list(ds_real.shape) # Make mutable
new_shape[-1] = new_spec_length
xp = np
kwargs = {}
"""
if isinstance(ds_real, h5py.Dataset) and not lazy:
warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')
if isinstance(ds_real, da.core.Array):
lazy = True
if lazy:
xp = da
ds_real = lazy_load_array(ds_real)
kwargs = {'chunks': 'auto'}
"""
ds_compound = xp.empty(new_shape, dtype=compound_type, **kwargs)
for name_ind, name in enumerate(compound_type.names):
i_start = name_ind * new_spec_length
i_end = (name_ind + 1) * new_spec_length
ds_compound[name] = ds_real[..., i_start:i_end]
return ds_compound.squeeze()
def stack_real_to_target_dtype(ds_real, new_dtype, lazy=False):
"""
Transforms real data into the target dtype
Parameters
----------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array` or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset
new_dtype : :class:`numpy.dtype`
Target data-type
Returns
----------
ret_val : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
N-dimensional array of the target data-type
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> num_elems = 5
>>> real_val = np.concatenate((np.random.random(size=num_elems) * 1024,
>>> np.random.randint(0, high=1024, size=num_elems),
>>> np.random.random(size=num_elems) * 1024))
>>> print('Real valued dataset of shape {}:'.format(real_val.shape))
>>> print(real_val)
Real valued dataset of shape (15,):
[276.65339095 527.80665658 741.38145798 647.06743252 710.41729083
380. 796. 504. 355. 985.
960.70015068 567.47024098 881.25140299 105.48936013 933.13686734]
>>> comp_val = sidpy.dtype_utils.stack_real_to_target_dtype(real_val, struct_dtype)
>>> print('Structured array of shape: {}'.format(comp_val.shape))
>>> print(comp_val)
Structured array of shape: (5,)
[(276.65338, 380, 960.70015068) (527.80664, 796, 567.47024098)
(741.3815 , 504, 881.25140299) (647.06744, 355, 105.48936013)
(710.4173 , 985, 933.13686734)]
"""
if is_complex_dtype(new_dtype):
return stack_real_to_complex(ds_real, lazy=lazy)
try:
if len(new_dtype) > 0:
return stack_real_to_compound(ds_real, new_dtype, lazy=lazy)
except TypeError:
return new_dtype(ds_real)
# catching all other cases, such as np.dtype('<f4')
return new_dtype.type(ds_real)
def validate_dtype(dtype):
"""
Checks the provided object to ensure that it is a valid dtype that can be written to an HDF5 file.
Raises a type error if invalid. Returns True if the object passed the tests
Parameters
----------
dtype : object
Object that is hopefully a :class:`h5py.Datatype`, or :class:`numpy.dtype` object
Returns
-------
status : bool
True if the object was a valid data-type
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> for item in [np.float16, np.complex64, np.uint8, np.int16]:
>>> print('Is {} a valid dtype? : {}'.format(item, sidpy.dtype_utils.validate_dtype(item)))
Is <class 'numpy.float16'> a valid dtype? : True
Is <class 'numpy.complex64'> a valid dtype? : True
Is <class 'numpy.uint8'> a valid dtype? : True
Is <class 'numpy.int16'> a valid dtype? : True
# This function is especially useful on compound or structured data types:
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
>>> print('Is {} a valid dtype? : {}'.format(struct_dtype, sidpy.dtype_utils.validate_dtype(struct_dtype)))
Is [('r', '<f4'), ('g', '<u2'), ('b', '<f8')] a valid dtype? : True
"""
if isinstance(dtype, (h5py.Datatype, np.dtype)):
pass
elif isinstance(np.dtype(dtype), np.dtype):
# This should catch all those instances when dtype is something familiar like - np.float32
pass
else:
raise TypeError('dtype should either be a numpy or h5py dtype')
return True
def is_complex_dtype(dtype):
"""
Checks if the provided dtype is a complex dtype
Parameters
----------
dtype : object
Object that is a class:`h5py.Datatype`, or :class:`numpy.dtype` object
Returns
-------
is_complex : bool
True if the dtype was a complex dtype. Else returns False
Examples
--------
>>> import numpy as np
>>> import sidpy
>>> for dtype in [np.float32, np.float16, np.uint8, np.int16, bool]:
>>> print('Is {} a complex dtype?: {}'.format(dtype, (sidpy.dtype_utils.is_complex_dtype(dtype))))
Is <class 'numpy.float32'> a complex dtype?: False
Is <class 'numpy.float16'> a complex dtype?: False
Is <class 'numpy.uint8'> a complex dtype?: False
Is <class 'numpy.int16'> a complex dtype?: False
Is <class 'bool'> a complex dtype?: False
>>> struct_dtype = np.dtype({'names': ['r', 'g', 'b'],
>>> 'formats': [np.float32, np.uint16, np.float64]})
Is [('r', '<f4'), ('g', '<u2'), ('b', '<f8')] a complex dtype?: False
>>> for dtype in [np.complex, np.complex64, np.complex128, np.complex256]:
>>> print('Is {} a complex dtype?: {}'.format(dtype, (sidpy.dtype_utils.is_complex_dtype(dtype))))
Is <class 'complex'> a complex dtype?: True
Is <class 'numpy.complex64'> a complex dtype?: True
Is <class 'numpy.complex128'> a complex dtype?: True
Is <class 'numpy.complex256'> a complex dtype?: False
"""
validate_dtype(dtype)
if dtype in [np.complex, np.complex64, np.complex128]:
return True
return False
| 41.12708
| 143
| 0.634872
|
ba65fc8806ad47ab257dc5956c5ff0cb759e6f83
| 2,603
|
py
|
Python
|
pydrr/GeometryContext.py
|
yuta-hi/pycuda_drr
|
08cc20c651d4288be67c40e4936f4280b04eec6f
|
[
"MIT"
] | 16
|
2019-12-14T09:24:43.000Z
|
2022-03-11T04:31:48.000Z
|
pydrr/GeometryContext.py
|
yuta-hi/pycuda_drr
|
08cc20c651d4288be67c40e4936f4280b04eec6f
|
[
"MIT"
] | 6
|
2020-07-05T16:59:45.000Z
|
2021-04-21T11:20:31.000Z
|
pydrr/GeometryContext.py
|
yuta-hi/pycuda_drr
|
08cc20c651d4288be67c40e4936f4280b04eec6f
|
[
"MIT"
] | 2
|
2020-07-13T09:11:44.000Z
|
2022-02-20T07:14:04.000Z
|
import numpy as np
from . import utils
class GeometryContext:
def __init__(self):
self.SOD_ = 0.0
self.SDD_ = 0.0
self.pixel_spacing_ = (1.0, 1.0)
self.image_size_ = (1024, 1024)
self.view_matrix_ = np.eye(4,4, dtype=np.float32)
self.intrinsic_ = None
self.extrinsic_ = None
self.projection_matrix_ = None
@property
def SOD(self):
return self.SOD_
@SOD.setter
def SOD(self, value):
self.intrinsic = None
self.SOD_ = value
@property
def SDD(self):
return self.SDD_
@SDD.setter
def SDD(self, value):
self.intrinsic = None
self.extrinsic = None
self.SDD_ = value
@property
def pixel_spacing(self):
return self.pixel_spacing_
@pixel_spacing.setter
def pixel_spacing(self, value):
self.intrinsic = None
self.pixel_spacing_ = value
@property
def image_size(self):
return self.image_size_
@image_size.setter
def image_size(self, value):
self.intrinsic = None
self.image_size_ = value
@property
def view_matrix(self):
return self.view_matrix_
@view_matrix.setter
def view_matrix(self, value):
self.extrinsic = None
self.view_matrix_ = value
@property
def intrinsic(self):
if self.intrinsic_ is None:
self.intrinsic_ = np.array([
[ self.SOD / self.pixel_spacing[0], 0, self.image_size[0] / 2 ],
[ 0, self.SOD / self.pixel_spacing[1], self.image_size[1] / 2 ],
[0, 0, 1]
])
return self.intrinsic_
@intrinsic.setter
def intrinsic(self, new_intrinsic):
self.projection_matrix = None
self.intrinsic_ = new_intrinsic
@property
def extrinsic(self):
if self.extrinsic_ is None:
extrinsic_T = utils.convertTransRotTo4x4([0, 0, -self.SOD, 0, 0, 0])
self.extrinsic_ = utils.concatenate4x4(extrinsic_T, self.view_matrix)
return self.extrinsic_
@extrinsic.setter
def extrinsic(self, new_extrinsic):
self.projection_matrix = None
self.extrinsic_ = new_extrinsic
@property
def projection_matrix(self):
if self.projection_matrix_ is None:
self.projection_matrix_ = utils.constructProjectionMatrix(self.intrinsic, self.extrinsic)
return self.projection_matrix_
@projection_matrix.setter
def projection_matrix(self, value):
self.projection_matrix_ = value
| 26.03
| 101
| 0.609681
|
9026659b28aff9c04d705f4afae884168fcdbbb2
| 3,626
|
py
|
Python
|
backend/apps/volontulo/tests/views/api/organizations/test_list.py
|
Simon323/volontulo
|
7a9eb2d201a1745ad18dd79c9ff4014031dbd195
|
[
"MIT"
] | null | null | null |
backend/apps/volontulo/tests/views/api/organizations/test_list.py
|
Simon323/volontulo
|
7a9eb2d201a1745ad18dd79c9ff4014031dbd195
|
[
"MIT"
] | null | null | null |
backend/apps/volontulo/tests/views/api/organizations/test_list.py
|
Simon323/volontulo
|
7a9eb2d201a1745ad18dd79c9ff4014031dbd195
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
.. module:: test_list
"""
from rest_framework import status
from rest_framework.test import APITestCase
from apps.volontulo.tests.views.offers.commons import TestOffersCommons
class _TestOrganizationsListAPIView(TestOffersCommons, APITestCase):
"""Tests for REST API's list organizations view."""
def test_organization_list_fields(self):
"""Test list's fields of organization REST API endpoint."""
response = self.client.get('/api/organizations/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
for organization in response.data:
self.assertIsInstance(organization.pop('id'), int)
self.assertIsInstance(organization.pop('name'), str)
self.assertIsInstance(organization.pop('slug'), str)
self.assertIsInstance(organization.pop('url'), str)
self.assertEqual(len(organization), 0)
class TestAdminUserOrganizationsListAPIView(_TestOrganizationsListAPIView):
"""Tests for REST API's list organizations view for admin user."""
def setUp(self):
"""Set up each test."""
super(TestAdminUserOrganizationsListAPIView, self).setUp()
self.client.login(username='admin@example.com', password='123admin')
def test_organization_list_length(self):
"""Test organizations list length for admin user.
Organizations are readable for everyone.
"""
response = self.client.get('/api/organizations/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
class TestOrganizationUserOrganizationsListAPIView(
_TestOrganizationsListAPIView):
"""Tests for API's list organizations view for user with organization."""
def setUp(self):
"""Set up each test."""
super(TestOrganizationUserOrganizationsListAPIView, self).setUp()
self.client.login(
username='cls.organization@example.com',
password='123org'
)
def test_organization_list_length(self):
"""Test organizations list length for user with organization.
Organizations are readable for everyone.
"""
response = self.client.get('/api/organizations/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
class TestRegularUserOrganizationsListAPIView(_TestOrganizationsListAPIView):
"""Tests for REST API's list organizations view for regular user."""
def setUp(self):
"""Set up each test."""
super(TestRegularUserOrganizationsListAPIView, self).setUp()
self.client.login(
username='volunteer@example.com',
password='123volunteer'
)
def test_organization_list_length(self):
"""Test organizations list length for regular user.
Organizations are readable for everyone.
"""
response = self.client.get('/api/organizations/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
class TestAnonymousUserOrganizationsListAPIView(_TestOrganizationsListAPIView):
"""Tests for REST API's list organizations view for anonymous user."""
def test_organization_list_length(self):
"""Test organizations list length for anonymous user.
Organizations are readable for everyone.
"""
response = self.client.get('/api/organizations/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
| 33.266055
| 79
| 0.690292
|
86d248e9f8fcd095ebd3753ddcdef27c368a9f7d
| 1,362
|
py
|
Python
|
sdk/ingestion/microsoft/bing/commerce/ingestion/models/schema_detection_response.py
|
microsoft/bing-commerce-sdk-for-python
|
cf555ea0bb14792708617d2435dd5aab1c4cbe90
|
[
"MIT"
] | 1
|
2020-05-04T09:58:07.000Z
|
2020-05-04T09:58:07.000Z
|
sdk/ingestion/microsoft/bing/commerce/ingestion/models/schema_detection_response.py
|
microsoft/bing-commerce-sdk-for-python
|
cf555ea0bb14792708617d2435dd5aab1c4cbe90
|
[
"MIT"
] | 2
|
2020-04-21T02:32:32.000Z
|
2020-04-21T19:37:54.000Z
|
sdk/ingestion/microsoft/bing/commerce/ingestion/models/schema_detection_response.py
|
microsoft/bing-commerce-sdk-for-python
|
cf555ea0bb14792708617d2435dd5aab1c4cbe90
|
[
"MIT"
] | 4
|
2020-07-31T10:39:22.000Z
|
2021-11-10T08:14:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SchemaDetectionResponse(Model):
"""SchemaDetectionResponse.
:param index:
:type index: ~microsoft.bing.commerce.ingestion.models.ResponseIndex
:param transformation_config:
:type transformation_config:
~microsoft.bing.commerce.ingestion.models.TransformationConfigResponse
:param warnings:
:type warnings: list[str]
"""
_attribute_map = {
'index': {'key': 'index', 'type': 'ResponseIndex'},
'transformation_config': {'key': 'transformationConfig', 'type': 'TransformationConfigResponse'},
'warnings': {'key': 'warnings', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(SchemaDetectionResponse, self).__init__(**kwargs)
self.index = kwargs.get('index', None)
self.transformation_config = kwargs.get('transformation_config', None)
self.warnings = kwargs.get('warnings', None)
| 36.810811
| 106
| 0.599119
|
d6fb88d579e9ae53329cd82825a8be3f1b9db84e
| 2,896
|
py
|
Python
|
tests/integration/offer/shipping_benefit_tests.py
|
ahmetdaglarbas/e-commerce
|
ff190244ccd422b4e08d7672f50709edcbb6ebba
|
[
"BSD-3-Clause"
] | 2
|
2015-12-11T00:19:15.000Z
|
2021-11-14T19:44:42.000Z
|
tests/integration/offer/shipping_benefit_tests.py
|
ahmetdaglarbas/e-commerce
|
ff190244ccd422b4e08d7672f50709edcbb6ebba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/offer/shipping_benefit_tests.py
|
ahmetdaglarbas/e-commerce
|
ff190244ccd422b4e08d7672f50709edcbb6ebba
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal as D
from django.test import TestCase
from django.test.client import RequestFactory
import mock
from oscar.apps.offer import models, utils
from oscar.apps.order.utils import OrderCreator
from oscar.apps.shipping.repository import Repository
from oscar.apps.shipping.methods import FixedPrice
from oscar.test.basket import add_product
from oscar.test import factories
def create_offer():
range = models.Range.objects.create(
name="All products", includes_all_products=True)
condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=1)
benefit = models.ShippingFixedPriceBenefit.objects.create(
type=models.Benefit.SHIPPING_FIXED_PRICE,
value=D('1.00'))
return models.ConditionalOffer.objects.create(
condition=condition,
benefit=benefit,
offer_type=models.ConditionalOffer.SITE)
def apply_offers(basket):
req = RequestFactory().get('/')
req.user = mock.Mock()
utils.Applicator().apply(req, basket)
class StubRepository(Repository):
"""
Stubbed shipped repository which overrides the get_shipping_methods method
in order to use a non-free default shipping method. This allows the
shipping discounts to be tested.
"""
methods = [FixedPrice(D('10.00'), D('10.00'))]
class TestAnOfferWithAShippingBenefit(TestCase):
def setUp(self):
self.basket = factories.create_basket(empty=True)
create_offer()
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D('12.00'))
apply_offers(self.basket)
self.assertEqual(1, len(self.basket.offer_applications))
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D('12.00'), 2)
apply_offers(self.basket)
self.assertEqual(1, len(self.basket.offer_applications))
def test_wraps_shipping_method_from_repository(self):
add_product(self.basket, D('12.00'), 1)
apply_offers(self.basket)
methods = StubRepository().get_shipping_methods(self.basket)
method = methods[0]
charge = method.calculate(self.basket)
self.assertEqual(D('1.00'), charge.incl_tax)
def test_has_discount_recorded_correctly_when_order_is_placed(self):
add_product(self.basket, D('12.00'), 1)
apply_offers(self.basket)
methods = StubRepository().get_shipping_methods(self.basket)
method = methods[0]
order = factories.create_order(basket=self.basket,
shipping_method=method)
discounts = order.discounts.all()
self.assertEqual(1, len(discounts))
discount = discounts[0]
self.assertTrue(discount.is_shipping_discount)
self.assertEqual(D('9.00'), discount.amount)
| 34.070588
| 78
| 0.703729
|
4d2488c387475fed0f30a9f48dc558b6d00efa92
| 4,297
|
py
|
Python
|
salt/loader_context.py
|
JochemGit/salt
|
d159b93ca77be3ec04cc5d349be82c210ffeb49d
|
[
"Apache-2.0"
] | 2
|
2015-08-21T01:05:03.000Z
|
2015-09-02T07:30:45.000Z
|
salt/loader_context.py
|
JochemGit/salt
|
d159b93ca77be3ec04cc5d349be82c210ffeb49d
|
[
"Apache-2.0"
] | 4
|
2021-02-06T14:30:48.000Z
|
2021-12-13T20:50:10.000Z
|
salt/loader_context.py
|
JochemGit/salt
|
d159b93ca77be3ec04cc5d349be82c210ffeb49d
|
[
"Apache-2.0"
] | 1
|
2020-06-02T14:15:24.000Z
|
2020-06-02T14:15:24.000Z
|
"""
Manage the context a module loaded by Salt's loader
"""
import collections.abc
import contextlib
import contextvars
DEFAULT_CTX_VAR = "loader_ctxvar"
loader_ctxvar = contextvars.ContextVar(DEFAULT_CTX_VAR)
@contextlib.contextmanager
def loader_context(loader):
"""
A context manager that sets and un-sets the loader context
"""
tok = loader_ctxvar.set(loader)
try:
yield
finally:
loader_ctxvar.reset(tok)
class NamedLoaderContext(collections.abc.MutableMapping):
"""
A NamedLoaderContext object is injected by the loader providing access to
Salt's 'magic dunders' (__salt__, __utils__, ect).
"""
def __init__(self, name, loader_context, default=None):
self.name = name
self.loader_context = loader_context
self.default = default
def loader(self):
"""
The LazyLoader in the current context. This will return None if there
is no context established.
"""
try:
return self.loader_context.loader()
except AttributeError:
return None
def eldest_loader(self):
if self.loader() is None:
return None
loader = self.loader()
while loader.parent_loader is not None:
loader = loader.parent_loader
return loader
def value(self):
"""
The value of the current for this context
"""
loader = self.loader()
if loader is None:
return self.default
if self.name == "__context__":
return loader.pack[self.name]
if self.name == loader.pack_self:
return loader
return loader.pack[self.name]
def get(self, key, default=None):
return self.value().get(key, default)
def __getitem__(self, item):
return self.value()[item]
def __contains__(self, item):
return item in self.value()
def __setitem__(self, item, value):
self.value()[item] = value
def __bool__(self):
try:
self.loader
except LookupError:
return False
return True
def __len__(self):
return self.value().__len__()
def __iter__(self):
return self.value().__iter__()
def __delitem__(self, item):
return self.value().__delitem__(item)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.loader_context == other.loader_context and self.name == other.name
def __getstate__(self):
return {
"name": self.name,
"loader_context": self.loader_context,
"default": None,
}
def __setstate__(self, state):
self.name = state["name"]
self.loader_context = state["loader_context"]
self.default = state["default"]
def __getattr__(self, name):
return getattr(self.value(), name)
def missing_fun_string(self, name):
return self.loader().missing_fun_string(name)
class LoaderContext:
"""
A loader context object, this object is injected at <loaded
module>.__salt_loader__ by the Salt loader. It is responsible for providing
access to the current context's loader
"""
def __init__(self, loader_ctxvar=loader_ctxvar):
self.loader_ctxvar = loader_ctxvar
def __getitem__(self, item):
return self.loader[item]
def loader(self):
"""
Return the LazyLoader in the current context. If there is no value set raise an AttributeError
"""
try:
return self.loader_ctxvar.get()
except LookupError:
raise AttributeError("No loader context")
def named_context(self, name, default=None, ctx_class=NamedLoaderContext):
"""
Return a NamedLoaderContext instance which will use this LoaderContext
"""
return ctx_class(name, self, default)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.loader_ctxvar == other.loader_ctxvar
def __getstate__(self):
return {"varname": self.loader_ctxvar.name}
def __setstate__(self, state):
self.loader_ctxvar = contextvars.ContextVar(state["varname"])
| 27.369427
| 102
| 0.627647
|
752a41d5e66914345312c169dcc5466b22a36637
| 1,515
|
py
|
Python
|
ppci/arch/jvm/printer.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | 161
|
2020-05-31T03:29:42.000Z
|
2022-03-07T08:36:19.000Z
|
ppci/arch/jvm/printer.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | 74
|
2020-05-26T18:05:48.000Z
|
2021-02-13T21:55:39.000Z
|
ppci/arch/jvm/printer.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | 19
|
2020-05-27T19:22:11.000Z
|
2022-02-17T18:53:52.000Z
|
""" Functions to print class contents in a verbose way.
"""
def print_class_file(class_file):
""" Dump a class file. """
ClassFilePrinter(class_file)
class ClassFilePrinter:
def __init__(self, class_file):
self.class_file = class_file
class_info = class_file.get_constant(class_file.this_class)
class_name = class_file.get_name(class_info.value)
print("class {}".format(class_name))
print(" minor version:", class_file.minor_version)
print(" major version:", class_file.major_version)
print(" flags:", class_file.access_flags)
print("Constant pool:")
for i, value in enumerate(class_file.constant_pool):
if not value:
continue
print(" #{} = {} {}".format(i, value.tag.name, value.value))
print("{")
for field in class_file.fields:
print(field)
self.print_attributes(" ", field.attributes)
for method in class_file.methods:
print(" {};".format(method.name))
print(" descriptor:", method.descriptor)
print(" flags:", method.access_flags)
self.print_attributes(" ", method.attributes)
print("}")
self.print_attributes("", class_file.attributes)
def print_attributes(self, indent, attributes):
for attribute in attributes:
name = attribute.name
value = attribute
print("{}{}: {}".format(indent, name, value))
| 32.934783
| 74
| 0.60132
|
fe3a3a9bd27d57285d7b190b681d0431c6e73d74
| 806
|
py
|
Python
|
Ex075.py
|
raphaeltertuliano/Python
|
ffa9813aaa13ccca807f7c08be9489a2d88d3d62
|
[
"MIT"
] | 1
|
2021-11-23T21:38:46.000Z
|
2021-11-23T21:38:46.000Z
|
Ex075.py
|
raphaeltertuliano/Python
|
ffa9813aaa13ccca807f7c08be9489a2d88d3d62
|
[
"MIT"
] | null | null | null |
Ex075.py
|
raphaeltertuliano/Python
|
ffa9813aaa13ccca807f7c08be9489a2d88d3d62
|
[
"MIT"
] | null | null | null |
#Desenvolva um programa que leia quatro valores pelo teclado
#e guarde-os em uma tupla. No final mostre:
#A)-Quantas vezes apareceu o valor 9
#B)-Em que posição foi digitado o primeiro valor 3
#C)-Quais foram os números pares
num = (int(input('Digite um número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')))
print(f'A lista é: {num}')
if num.count(9) == 0:
print('Não foi digitado nenhum número 9')
else:
print(f'O número 9 foi digitado {num.count(9)} vezes')
if 3 in num:
print(f'O número 3 aparece pela primeira vez na posição {num.index(3)+1}')
else:
print('Não foi digitado nenhum número 3')
print('Os números pares digitados foram: ', end='')
for n in num:
if n % 2 == 0:
print(n, end=' ')
| 33.583333
| 78
| 0.653846
|
f6fafb60a333f5ff97de5f1aed1948aed49dc4c7
| 979
|
py
|
Python
|
expyfun/__init__.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 2
|
2015-12-31T07:56:16.000Z
|
2016-08-22T17:23:02.000Z
|
expyfun/__init__.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 6
|
2015-02-18T04:25:46.000Z
|
2017-01-25T01:00:35.000Z
|
expyfun/__init__.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 1
|
2015-12-31T07:56:20.000Z
|
2015-12-31T07:56:20.000Z
|
"""
Experiment control
==================
Experiment control functions.
"""
from ._version import __version__
# have to import verbose first since it's needed by many things
from ._utils import (set_log_level, set_log_file, set_config, check_units,
get_config, get_config_path, fetch_data_file,
run_subprocess)
from ._utils import verbose_dec as verbose, building_doc
from ._git import assert_version, download_version
from ._experiment_controller import ExperimentController, get_keyboard_input
from ._eyelink_controller import EyelinkController
from ._sound_controllers import SoundCardController
from ._trigger_controllers import (decimals_to_binary, binary_to_decimals,
ParallelTrigger)
from ._tdt_controller import TDTController
from . import analyze
from . import codeblocks
from . import io
from . import stimuli
from . import _fixes
# INIT LOGGING
set_log_level(None, False)
set_log_file()
| 31.580645
| 76
| 0.754852
|
f3954927662d7fb9cd55f42b2cfd03b7ceac3990
| 1,309
|
py
|
Python
|
Lib/site-packages/sphinx/util/rst.py
|
TencentCodeDog/win-Python-v2.7.13
|
13dfc85fee2537ea352e6058bc25e6f94bb3f467
|
[
"bzip2-1.0.6"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
venv/Lib/site-packages/sphinx/util/rst.py
|
prats1997/Euphorum
|
16bfee9c71ea5b1332c6263233c79a633ddfdd83
|
[
"MIT"
] | 7
|
2020-03-24T15:50:06.000Z
|
2021-06-08T19:57:39.000Z
|
venv/Lib/site-packages/sphinx/util/rst.py
|
prats1997/Euphorum
|
16bfee9c71ea5b1332c6263233c79a633ddfdd83
|
[
"MIT"
] | 2
|
2018-07-19T16:53:00.000Z
|
2018-08-02T21:59:44.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.util.rst
~~~~~~~~~~~~~~~
reST helper functions.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
import re
from contextlib import contextmanager
from docutils.parsers.rst import roles
from docutils.parsers.rst.languages import en as english
from docutils.utils import Reporter
from sphinx.util import logging
if False:
# For type annotation
from typing import Generator # NOQA
symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
logger = logging.getLogger(__name__)
def escape(text):
# type: (unicode) -> unicode
text = symbols_re.sub(r'\\\1', text) # type: ignore
text = re.sub(r'^\.', r'\.', text) # escape a dot at top
return text
@contextmanager
def default_role(docname, name):
# type: (unicode, unicode) -> Generator
if name:
dummy_reporter = Reporter('', 4, 4)
role_fn, _ = roles.role(name, english, 0, dummy_reporter)
if role_fn:
roles._roles[''] = role_fn
else:
logger.warning('default role %s not found', name, location=docname)
yield
roles._roles.pop('', None) # if a document has set a local default role
| 25.666667
| 79
| 0.647059
|
7985efa0f7c14f6239771375971c057dfa0f5a13
| 54,983
|
py
|
Python
|
gensrc/script/vectorized/vectorized_functions.py
|
niexiongchao/starRocks
|
afecea80e50ee505261fffc3cff5f546ee285475
|
[
"Zlib",
"PSF-2.0",
"Apache-2.0"
] | null | null | null |
gensrc/script/vectorized/vectorized_functions.py
|
niexiongchao/starRocks
|
afecea80e50ee505261fffc3cff5f546ee285475
|
[
"Zlib",
"PSF-2.0",
"Apache-2.0"
] | null | null | null |
gensrc/script/vectorized/vectorized_functions.py
|
niexiongchao/starRocks
|
afecea80e50ee505261fffc3cff5f546ee285475
|
[
"Zlib",
"PSF-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# This file is licensed under the Elastic License 2.0. Copyright 2021-present, StarRocks Limited.
# The format is:
# <function id> <name>, <return_type>, [<args>], <backend fn>,
# With an optional
# <prepare fn>, <close fn>
#
# example:
# [1, "add", "TINYINT", ["TINYINT", "TINYINT"], "Math::add", "Math::add_prepare", "Math::add_close"]
#
# the id rule: {module function group}|0|{function group}|{sub-function/alias-function}
#
# example round functions: 1 013 0 = 10130
# ^ ^ ^
# {math function} {function group} {sub-function}
vectorized_functions = [
# 10xxx: math functions
[10010, "pi", "DOUBLE", [], "MathFunctions::pi"],
[10020, "e", "DOUBLE", [], "MathFunctions::e"],
[10030, "sign", "FLOAT", ["DOUBLE"], "MathFunctions::sign"],
[10040, "abs", "DOUBLE", ["DOUBLE"], "MathFunctions::abs_double"],
[10041, "abs", "FLOAT", ["FLOAT"], "MathFunctions::abs_float"],
[10042, "abs", "LARGEINT", ["LARGEINT"], "MathFunctions::abs_largeint"],
[10043, "abs", "LARGEINT", ["BIGINT"], "MathFunctions::abs_bigint"],
[10044, "abs", "BIGINT", ["INT"], "MathFunctions::abs_int"],
[10045, "abs", "INT", ["SMALLINT"], "MathFunctions::abs_smallint"],
[10046, "abs", "SMALLINT", ["TINYINT"], "MathFunctions::abs_tinyint"],
[10047, "abs", "DECIMALV2", ["DECIMALV2"], "MathFunctions::abs_decimalv2val"],
[100470, "abs", "DECIMAL32", ["DECIMAL32"], "MathFunctions::abs_decimal32"],
[100471, "abs", "DECIMAL64", ["DECIMAL64"], "MathFunctions::abs_decimal64"],
[100472, "abs", "DECIMAL128", ["DECIMAL128"], "MathFunctions::abs_decimal128"],
[10050, "sin", "DOUBLE", ["DOUBLE"], "MathFunctions::sin"],
[10060, "asin", "DOUBLE", ["DOUBLE"], "MathFunctions::asin"],
[10070, "cos", "DOUBLE", ["DOUBLE"], "MathFunctions::cos"],
[10080, "acos", "DOUBLE", ["DOUBLE"], "MathFunctions::acos"],
[10090, "tan", "DOUBLE", ["DOUBLE"], "MathFunctions::tan"],
[10100, "atan", "DOUBLE", ["DOUBLE"], "MathFunctions::atan"],
[10110, "ceil", "BIGINT", ["DOUBLE"], "MathFunctions::ceil"],
[10111, "ceiling", "BIGINT", ["DOUBLE"], "MathFunctions::ceil"],
[10112, "dceil", "BIGINT", ["DOUBLE"], "MathFunctions::ceil"],
[10120, "floor", "BIGINT", ["DOUBLE"], "MathFunctions::floor"],
[10121, "dfloor", "BIGINT", ["DOUBLE"], "MathFunctions::floor"],
[10127, "round", "DECIMAL128", ["DECIMAL128"], "MathFunctions::round_decimal128"],
[10128, "round", "DECIMAL128", ["DECIMAL128", "INT"], "MathFunctions::round_up_to_decimal128"],
[10129, "truncate", "DECIMAL128", ["DECIMAL128", "INT"], "MathFunctions::truncate_decimal128"],
[10130, "round", "BIGINT", ["DOUBLE"], "MathFunctions::round"],
[10131, "dround", "BIGINT", ["DOUBLE"], "MathFunctions::round"],
[10132, "round", "DOUBLE", ["DOUBLE", "INT"], "MathFunctions::round_up_to"],
[10133, "dround", "DOUBLE", ["DOUBLE", "INT"], "MathFunctions::round_up_to"],
[10134, "truncate", "DOUBLE", ["DOUBLE", "INT"], "MathFunctions::truncate"],
[10140, "ln", "DOUBLE", ["DOUBLE"], "MathFunctions::ln"],
[10141, "dlog1", "DOUBLE", ["DOUBLE"], "MathFunctions::ln"],
[10142, "log", "DOUBLE", ["DOUBLE"], "MathFunctions::ln"],
[10150, "log", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::log"],
[10160, "log2", "DOUBLE", ["DOUBLE"], "MathFunctions::log2"],
[10170, "log10", "DOUBLE", ["DOUBLE"], "MathFunctions::log10"],
[10171, "dlog10", "DOUBLE", ["DOUBLE"], "MathFunctions::log10"],
[10180, "exp", "DOUBLE", ["DOUBLE"], "MathFunctions::exp"],
[10181, "dexp", "DOUBLE", ["DOUBLE"], "MathFunctions::exp"],
[10190, "radians", "DOUBLE", ["DOUBLE"], "MathFunctions::radians"],
[10200, "degrees", "DOUBLE", ["DOUBLE"], "MathFunctions::degrees"],
[10210, "sqrt", "DOUBLE", ["DOUBLE"], "MathFunctions::sqrt"],
[10211, "dsqrt", "DOUBLE", ["DOUBLE"], "MathFunctions::sqrt"],
[10220, "pow", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::pow"],
[10221, "power", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::pow"],
[10222, "dpow", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::pow"],
[10223, "fpow", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::pow"],
[10224, "atan2", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::atan2"],
[10225, "cot", "DOUBLE", ["DOUBLE"], "MathFunctions::cot"],
[10230, "pmod", "BIGINT", ["BIGINT", "BIGINT"], "MathFunctions::pmod<TYPE_BIGINT>"],
[10231, "pmod", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::pmod<TYPE_DOUBLE>"],
[10240, "fmod", "FLOAT", ["FLOAT", "FLOAT"], "MathFunctions::fmod<TYPE_FLOAT>"],
[10241, "fmod", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::fmod<TYPE_DOUBLE>"],
[10250, "mod", "TINYINT", ["TINYINT", "TINYINT"], "MathFunctions::mod<TYPE_TINYINT>"],
[10251, "mod", "SMALLINT", ["SMALLINT", "SMALLINT"], "MathFunctions::mod<TYPE_SMALLINT>"],
[10252, "mod", "INT", ["INT", "INT"], "MathFunctions::mod<TYPE_INT>"],
[10253, "mod", "BIGINT", ["BIGINT", "BIGINT"], "MathFunctions::mod<TYPE_BIGINT>"],
[10254, "mod", "LARGEINT", ["LARGEINT", "LARGEINT"], "MathFunctions::mod<TYPE_LARGEINT>"],
[10255, "mod", "FLOAT", ["FLOAT", "FLOAT"], "MathFunctions::fmod<TYPE_FLOAT>"],
[10256, "mod", "DOUBLE", ["DOUBLE", "DOUBLE"], "MathFunctions::fmod<TYPE_DOUBLE>"],
[10257, "mod", "DECIMALV2", ["DECIMALV2", "DECIMALV2"], "MathFunctions::mod<TYPE_DECIMALV2>"],
[102570, "mod", "DECIMAL32", ["DECIMAL32", "DECIMAL32"], "MathFunctions::mod<TYPE_DECIMAL32>"],
[102571, "mod", "DECIMAL64", ["DECIMAL64", "DECIMAL64"], "MathFunctions::mod<TYPE_DECIMAL64>"],
[102572, "mod", "DECIMAL128", ["DECIMAL128", "DECIMAL128"], "MathFunctions::mod<TYPE_DECIMAL128>"],
[10260, "positive", "DOUBLE", ["DOUBLE"], "MathFunctions::positive<TYPE_DOUBLE>"],
[10261, "positive", "BIGINT", ["BIGINT"], "MathFunctions::positive<TYPE_BIGINT>"],
[10262, "positive", "DECIMALV2", ["DECIMALV2"], "MathFunctions::positive<TYPE_DECIMALV2>"],
[102620, "positive", "DECIMAL32", ["DECIMAL32"], "MathFunctions::positive<TYPE_DECIMAL32>"],
[102621, "positive", "DECIMAL64", ["DECIMAL64"], "MathFunctions::positive<TYPE_DECIMAL64>"],
[102622, "positive", "DECIMAL128", ["DECIMAL128"], "MathFunctions::positive<TYPE_DECIMAL128>"],
[10270, "negative", "DOUBLE", ["DOUBLE"], "MathFunctions::negative<TYPE_DOUBLE>"],
[10271, "negative", "BIGINT", ["BIGINT"], "MathFunctions::negative<TYPE_BIGINT>"],
[10272, "negative", "DECIMALV2", ["DECIMALV2"], "MathFunctions::negative<TYPE_DECIMALV2>"],
[102720, "negative", "DECIMAL32", ["DECIMAL32"], "MathFunctions::negative<TYPE_DECIMAL32>"],
[102721, "negative", "DECIMAL64", ["DECIMAL64"], "MathFunctions::negative<TYPE_DECIMAL64>"],
[102722, "negative", "DECIMAL128", ["DECIMAL128"], "MathFunctions::negative<TYPE_DECIMAL128>"],
[10280, "least", "TINYINT", ["TINYINT", "..."], "MathFunctions::least<TYPE_TINYINT>"],
[10281, "least", "SMALLINT", ["SMALLINT", "..."], "MathFunctions::least<TYPE_SMALLINT>"],
[10282, "least", "INT", ["INT", "..."], "MathFunctions::least<TYPE_INT>"],
[10283, "least", "BIGINT", ["BIGINT", "..."], "MathFunctions::least<TYPE_BIGINT>"],
[10284, "least", "LARGEINT", ["LARGEINT", "..."], "MathFunctions::least<TYPE_LARGEINT>"],
[10285, "least", "FLOAT", ["FLOAT", "..."], "MathFunctions::least<TYPE_FLOAT>"],
[10286, "least", "DOUBLE", ["DOUBLE", "..."], "MathFunctions::least<TYPE_DOUBLE>"],
[10287, "least", "DECIMALV2", ["DECIMALV2", "..."], "MathFunctions::least<TYPE_DECIMALV2>"],
[102870, "least", "DECIMAL32", ["DECIMAL32", "..."], "MathFunctions::least<TYPE_DECIMAL32>"],
[102871, "least", "DECIMAL64", ["DECIMAL64", "..."], "MathFunctions::least<TYPE_DECIMAL64>"],
[102872, "least", "DECIMAL128", ["DECIMAL128", "..."], "MathFunctions::least<TYPE_DECIMAL128>"],
[10288, "least", "DATETIME", ["DATETIME", "..."], "MathFunctions::least<TYPE_DATETIME>"],
[10289, "least", "VARCHAR", ["VARCHAR", "..."], "MathFunctions::least<TYPE_VARCHAR>"],
[10290, "greatest", "TINYINT", ["TINYINT", "..."], "MathFunctions::greatest<TYPE_TINYINT>"],
[10291, "greatest", "SMALLINT", ["SMALLINT", "..."], "MathFunctions::greatest<TYPE_SMALLINT>"],
[10292, "greatest", "INT", ["INT", "..."], "MathFunctions::greatest<TYPE_INT>"],
[10293, "greatest", "BIGINT", ["BIGINT", "..."], "MathFunctions::greatest<TYPE_BIGINT>"],
[10294, "greatest", "LARGEINT", ["LARGEINT", "..."], "MathFunctions::greatest<TYPE_LARGEINT>"],
[10295, "greatest", "FLOAT", ["FLOAT", "..."], "MathFunctions::greatest<TYPE_FLOAT>"],
[10296, "greatest", "DOUBLE", ["DOUBLE", "..."], "MathFunctions::greatest<TYPE_DOUBLE>"],
[10297, "greatest", "DECIMALV2", ["DECIMALV2", "..."], "MathFunctions::greatest<TYPE_DECIMALV2>"],
[102970, "greatest", "DECIMAL32", ["DECIMAL32", "..."], "MathFunctions::greatest<TYPE_DECIMAL32>"],
[102971, "greatest", "DECIMAL64", ["DECIMAL64", "..."], "MathFunctions::greatest<TYPE_DECIMAL64>"],
[102972, "greatest", "DECIMAL128", ["DECIMAL128", "..."], "MathFunctions::greatest<TYPE_DECIMAL128>"],
[10298, "greatest", "DATETIME", ["DATETIME", "..."], "MathFunctions::greatest<TYPE_DATETIME>"],
[10299, "greatest", "VARCHAR", ["VARCHAR", "..."], "MathFunctions::greatest<TYPE_VARCHAR>"],
[10300, "rand", "DOUBLE", [], "MathFunctions::rand", "MathFunctions::rand_prepare", "MathFunctions::rand_close"],
[10301, "random", "DOUBLE", [], "MathFunctions::rand", "MathFunctions::rand_prepare", "MathFunctions::rand_close"],
[10302, "rand", "DOUBLE", ["BIGINT"], "MathFunctions::rand_seed", "MathFunctions::rand_prepare",
"MathFunctions::rand_close"],
[10303, "random", "DOUBLE", ["BIGINT"], "MathFunctions::rand_seed", "MathFunctions::rand_prepare",
"MathFunctions::rand_close"],
[10311, "bin", "VARCHAR", ['BIGINT'], "MathFunctions::bin"],
[10312, "hex", "VARCHAR", ['BIGINT'], "StringFunctions::hex_int"],
[10313, "hex", "VARCHAR", ['VARCHAR'], "StringFunctions::hex_string"],
[10314, "unhex", "VARCHAR", ['VARCHAR'], "StringFunctions::unhex"],
[10315, "sm3", "VARCHAR", ['VARCHAR'], "StringFunctions::sm3"],
[10320, "conv", "VARCHAR", ["BIGINT", "TINYINT", "TINYINT"], "MathFunctions::conv_int"],
[10321, "conv", "VARCHAR", ["VARCHAR", "TINYINT", "TINYINT"], "MathFunctions::conv_string"],
[10322, "square", "DOUBLE", ["DOUBLE"], "MathFunctions::square"],
# 20xxx: bit functions
[20010, 'bitand', 'TINYINT', ['TINYINT', 'TINYINT'], "BitFunctions::bitAnd<TYPE_TINYINT>"],
[20011, 'bitand', 'SMALLINT', ['SMALLINT', 'SMALLINT'], "BitFunctions::bitAnd<TYPE_SMALLINT>"],
[20012, 'bitand', 'INT', ['INT', 'INT'], "BitFunctions::bitAnd<TYPE_INT>"],
[20013, 'bitand', 'BIGINT', ['BIGINT', 'BIGINT'], "BitFunctions::bitAnd<TYPE_BIGINT>"],
[20014, 'bitand', 'LARGEINT', ['LARGEINT', 'LARGEINT'], "BitFunctions::bitAnd<TYPE_LARGEINT>"],
[20020, 'bitor', 'TINYINT', ['TINYINT', 'TINYINT'], "BitFunctions::bitOr<TYPE_TINYINT>"],
[20021, 'bitor', 'SMALLINT', ['SMALLINT', 'SMALLINT'], "BitFunctions::bitOr<TYPE_SMALLINT>"],
[20022, 'bitor', 'INT', ['INT', 'INT'], "BitFunctions::bitOr<TYPE_INT>"],
[20023, 'bitor', 'BIGINT', ['BIGINT', 'BIGINT'], "BitFunctions::bitOr<TYPE_BIGINT>"],
[20024, 'bitor', 'LARGEINT', ['LARGEINT', 'LARGEINT'], "BitFunctions::bitOr<TYPE_LARGEINT>"],
[20030, 'bitxor', 'TINYINT', ['TINYINT', 'TINYINT'], "BitFunctions::bitXor<TYPE_TINYINT>"],
[20031, 'bitxor', 'SMALLINT', ['SMALLINT', 'SMALLINT'], "BitFunctions::bitXor<TYPE_SMALLINT>"],
[20032, 'bitxor', 'INT', ['INT', 'INT'], "BitFunctions::bitXor<TYPE_INT>"],
[20033, 'bitxor', 'BIGINT', ['BIGINT', 'BIGINT'], "BitFunctions::bitXor<TYPE_BIGINT>"],
[20034, 'bitxor', 'LARGEINT', ['LARGEINT', 'LARGEINT'], "BitFunctions::bitXor<TYPE_LARGEINT>"],
[20040, 'bitnot', 'TINYINT', ['TINYINT'], "BitFunctions::bitNot<TYPE_TINYINT>"],
[20041, 'bitnot', 'SMALLINT', ['SMALLINT'], "BitFunctions::bitNot<TYPE_SMALLINT>"],
[20042, 'bitnot', 'INT', ['INT'], "BitFunctions::bitNot<TYPE_INT>"],
[20043, 'bitnot', 'BIGINT', ['BIGINT'], "BitFunctions::bitNot<TYPE_BIGINT>"],
[20044, 'bitnot', 'LARGEINT', ['LARGEINT'], "BitFunctions::bitNot<TYPE_LARGEINT>"],
# 30xxx: string functions
[30010, 'substr', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::substring', 'StringFunctions::sub_str_prepare', 'StringFunctions::sub_str_close'],
[30011, 'substr', 'VARCHAR', ['VARCHAR', 'INT', 'INT'], 'StringFunctions::substring', 'StringFunctions::sub_str_prepare', 'StringFunctions::sub_str_close'],
[30012, 'substring', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::substring', 'StringFunctions::sub_str_prepare', 'StringFunctions::sub_str_close'],
[30013, 'substring', 'VARCHAR', ['VARCHAR', 'INT', 'INT'], 'StringFunctions::substring', 'StringFunctions::sub_str_prepare', 'StringFunctions::sub_str_close'],
[30020, 'left', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::left', 'StringFunctions::left_or_right_prepare', 'StringFunctions::left_or_right_close'],
[30021, 'strleft', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::left', 'StringFunctions::left_or_right_prepare', 'StringFunctions::left_or_right_close'],
[30030, 'right', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::right', 'StringFunctions::left_or_right_prepare', 'StringFunctions::left_or_right_close'],
[30031, 'strright', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::right', 'StringFunctions::left_or_right_prepare', 'StringFunctions::left_or_right_close'],
[30040, 'ends_with', 'BOOLEAN', ['VARCHAR', 'VARCHAR'], 'StringFunctions::ends_with'],
[30050, 'starts_with', 'BOOLEAN', ['VARCHAR', 'VARCHAR'], 'StringFunctions::starts_with'],
[30060, 'null_or_empty', 'BOOLEAN', ['VARCHAR'], 'StringFunctions::null_or_empty'],
[30070, 'space', 'VARCHAR', ['INT'], 'StringFunctions::space'],
[30080, 'repeat', 'VARCHAR', ['VARCHAR', 'INT'], 'StringFunctions::repeat'],
[30090, 'lpad', 'VARCHAR', ['VARCHAR', 'INT', 'VARCHAR'], 'StringFunctions::lpad', 'StringFunctions::pad_prepare', 'StringFunctions::pad_close'],
[30100, 'rpad', 'VARCHAR', ['VARCHAR', 'INT', 'VARCHAR'], 'StringFunctions::rpad', 'StringFunctions::pad_prepare', 'StringFunctions::pad_close'],
[30110, 'append_trailing_char_if_absent', 'VARCHAR', ['VARCHAR', 'VARCHAR'],
'StringFunctions::append_trailing_char_if_absent'],
[30120, 'length', 'INT', ['VARCHAR'], 'StringFunctions::length'],
[30130, 'char_length', 'INT', ['VARCHAR'], 'StringFunctions::utf8_length'],
[30131, 'character_length', 'INT', ['VARCHAR'], 'StringFunctions::utf8_length'],
[30140, 'lower', 'VARCHAR', ['VARCHAR'], 'StringFunctions::lower'],
[30141, 'lcase', 'VARCHAR', ['VARCHAR'], 'StringFunctions::lower'],
[30150, 'upper', 'VARCHAR', ['VARCHAR'], 'StringFunctions::upper'],
[30151, 'ucase', 'VARCHAR', ['VARCHAR'], 'StringFunctions::upper'],
[30160, 'reverse', 'VARCHAR', ['VARCHAR'], 'StringFunctions::reverse'],
[30170, 'trim', 'VARCHAR', ['VARCHAR'], 'StringFunctions::trim'],
[30180, 'ltrim', 'VARCHAR', ['VARCHAR'], 'StringFunctions::ltrim'],
[30190, 'rtrim', 'VARCHAR', ['VARCHAR'], 'StringFunctions::rtrim'],
[30200, 'ascii', 'INT', ['VARCHAR'], 'StringFunctions::ascii'],
[30500, 'char', 'VARCHAR', ['INT'], "StringFunctions::get_char"],
[30210, 'instr', 'INT', ['VARCHAR', 'VARCHAR'], 'StringFunctions::instr'],
[30220, 'locate', 'INT', ['VARCHAR', 'VARCHAR'], 'StringFunctions::locate'],
[30221, 'locate', 'INT', ['VARCHAR', 'VARCHAR', 'INT'], 'StringFunctions::locate_pos'],
[30250, 'concat', 'VARCHAR', ['VARCHAR', '...'], 'StringFunctions::concat', 'StringFunctions::concat_prepare', 'StringFunctions::concat_close'],
[30260, 'concat_ws', 'VARCHAR', ['VARCHAR', 'VARCHAR', '...'], 'StringFunctions::concat_ws'],
[30270, 'find_in_set', 'INT', ['VARCHAR', 'VARCHAR'], 'StringFunctions::find_in_set'],
[30310, 'split_part', 'VARCHAR', ['VARCHAR', 'VARCHAR', 'INT'], 'StringFunctions::split_part'],
[30311, 'split', 'ARRAY_VARCHAR', ['VARCHAR', 'VARCHAR'], 'StringFunctions::split', 'StringFunctions::split_prepare', 'StringFunctions::split_close'],
[30320, 'regexp_extract', 'VARCHAR', ['VARCHAR', 'VARCHAR', 'BIGINT'], 'StringFunctions::regexp_extract',
'StringFunctions::regexp_prepare', 'StringFunctions::regexp_close'],
[30330, 'regexp_replace', 'VARCHAR', ['VARCHAR', 'VARCHAR', 'VARCHAR'], 'StringFunctions::regexp_replace',
'StringFunctions::regexp_prepare', 'StringFunctions::regexp_close'],
[30331, 'replace', 'VARCHAR', ['VARCHAR', 'VARCHAR', 'VARCHAR'], 'StringFunctions::regexp_replace',
'StringFunctions::regexp_prepare', 'StringFunctions::regexp_close'],
[30400, "money_format", "VARCHAR", ["BIGINT"], "StringFunctions::money_format_bigint"],
[30401, "money_format", "VARCHAR", ["LARGEINT"], "StringFunctions::money_format_largeint"],
[30402, "money_format", "VARCHAR", ["DECIMALV2"], "StringFunctions::money_format_decimalv2val"],
[304020, "money_format", "VARCHAR", ["DECIMAL32"], "StringFunctions::money_format_decimal<TYPE_DECIMAL32>"],
[304021, "money_format", "VARCHAR", ["DECIMAL64"], "StringFunctions::money_format_decimal<TYPE_DECIMAL64>"],
[304022, "money_format", "VARCHAR", ["DECIMAL128"], "StringFunctions::money_format_decimal<TYPE_DECIMAL128>"],
[30403, "money_format", "VARCHAR", ["DOUBLE"], "StringFunctions::money_format_double"],
[30410, 'parse_url', 'VARCHAR', ['VARCHAR', 'VARCHAR'], 'StringFunctions::parse_url',
'StringFunctions::parse_url_prepare', 'StringFunctions::parse_url_close'],
# 50xxx: timestamp functions
[50009, 'year', 'SMALLINT', ['DATETIME'], 'TimeFunctions::yearV2'],
[50010, 'year', 'INT', ['DATETIME'], 'TimeFunctions::year'],
[50019, 'month', 'TINYINT', ['DATETIME'], 'TimeFunctions::monthV2'],
[50020, 'month', 'INT', ['DATETIME'], 'TimeFunctions::month'],
[50030, 'quarter', 'INT', ['DATETIME'], 'TimeFunctions::quarter'],
[50040, 'dayofweek', 'INT', ['DATETIME'], 'TimeFunctions::day_of_week'],
[50050, 'to_date', 'DATE', ['DATETIME'], 'TimeFunctions::to_date'],
[50051, 'date', 'DATE', ['DATETIME'], 'TimeFunctions::to_date'],
[50058, 'day', 'TINYINT', ['DATETIME'], 'TimeFunctions::dayV2'],
[50059, 'dayofmonth', 'TINYINT', ['DATETIME'], 'TimeFunctions::dayV2'],
[50060, 'dayofmonth', 'INT', ['DATETIME'], 'TimeFunctions::day'],
[50061, 'day', 'INT', ['DATETIME'], 'TimeFunctions::day'],
[50062, 'dayofyear', 'INT', ['DATETIME'], 'TimeFunctions::day_of_year'],
[50063, 'weekofyear', 'INT', ['DATETIME'], 'TimeFunctions::week_of_year'],
[50069, 'hour', 'TINYINT', ['DATETIME'], 'TimeFunctions::hourV2'],
[50070, 'hour', 'INT', ['DATETIME'], 'TimeFunctions::hour'],
[50079, 'minute', 'TINYINT', ['DATETIME'], 'TimeFunctions::minuteV2'],
[50080, 'minute', 'INT', ['DATETIME'], 'TimeFunctions::minute'],
[50089, 'second', 'TINYINT', ['DATETIME'], 'TimeFunctions::secondV2'],
[50090, 'second', 'INT', ['DATETIME'], 'TimeFunctions::second'],
[50110, 'years_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::years_add'],
[50111, 'years_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::years_sub'],
[50120, 'months_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::months_add'],
[50121, 'months_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::months_sub'],
[50122, 'add_months', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::months_add'],
[50130, 'weeks_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::weeks_add'],
[50131, 'weeks_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::weeks_sub'],
[50140, 'days_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::days_add'],
[50141, 'days_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::days_sub'],
[50142, 'date_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::days_add'],
[50143, 'date_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::days_sub'],
[50144, 'adddate', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::days_add'],
[50145, 'subdate', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::days_sub'],
[50150, 'hours_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::hours_add'],
[50151, 'hours_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::hours_sub'],
[50160, 'minutes_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::minutes_add'],
[50161, 'minutes_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::minutes_sub'],
[50170, 'seconds_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::seconds_add'],
[50171, 'seconds_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::seconds_sub'],
[50180, 'microseconds_add', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::micros_add'],
[50181, 'microseconds_sub', 'DATETIME', ['DATETIME', 'INT'], 'TimeFunctions::micros_sub'],
[50190, 'years_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::years_diff'],
[50191, 'months_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::months_diff'],
[50192, 'weeks_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::weeks_diff'],
[50193, 'days_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::days_diff'],
[50194, 'hours_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::hours_diff'],
[50195, 'minutes_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::minutes_diff'],
[50196, 'seconds_diff', 'BIGINT', ['DATETIME', 'DATETIME'], 'TimeFunctions::seconds_diff'],
[50197, 'datediff', 'INT', ['DATETIME', 'DATETIME'], 'TimeFunctions::date_diff'],
[50198, 'timediff', 'TIME', ['DATETIME', 'DATETIME'], 'TimeFunctions::time_diff'],
[50200, 'now', 'DATETIME', [], 'TimeFunctions::now'],
[50201, 'current_timestamp', 'DATETIME', [], 'TimeFunctions::now'],
[50202, 'localtime', 'DATETIME', [], 'TimeFunctions::now'],
[50203, 'localtimestamp', 'DATETIME', [], 'TimeFunctions::now'],
[50210, 'curtime', 'TIME', [], 'TimeFunctions::curtime'],
[50211, 'current_time', 'TIME', [], 'TimeFunctions::curtime'],
[50220, 'curdate', 'DATE', [], 'TimeFunctions::curdate'],
[50221, 'current_date', 'DATE', [], 'TimeFunctions::curdate'],
[50230, 'from_days', 'DATE', ['INT'], 'TimeFunctions::from_days'],
[50231, 'to_days', 'INT', ['DATE'], 'TimeFunctions::to_days'],
[50240, 'str_to_date', 'DATETIME', ['VARCHAR', 'VARCHAR'], 'TimeFunctions::str_to_date', 'TimeFunctions::str_to_date_prepare', 'TimeFunctions::str_to_date_close'],
[50241, 'date_format', 'VARCHAR', ['DATETIME', 'VARCHAR'], 'TimeFunctions::datetime_format',
'TimeFunctions::format_prepare', 'TimeFunctions::format_close'],
[50242, 'date_format', 'VARCHAR', ['DATE', 'VARCHAR'], 'TimeFunctions::date_format',
'TimeFunctions::format_prepare', 'TimeFunctions::format_close'],
# cast string to date, the function will call by FE getStrToDateFunction, and is invisible to user
[50243, 'str2date', 'DATE', ['VARCHAR', 'VARCHAR'], 'TimeFunctions::str2date', 'TimeFunctions::str_to_date_prepare', 'TimeFunctions::str_to_date_close'],
[50250, 'time_to_sec', 'BIGINT', ['TIME'], 'TimeFunctions::time_to_sec'],
[50300, 'unix_timestamp', 'INT', [], 'TimeFunctions::to_unix_for_now'],
[50301, 'unix_timestamp', 'INT', ['DATETIME'], 'TimeFunctions::to_unix_from_datetime'],
[50302, 'unix_timestamp', 'INT', ['DATE'], 'TimeFunctions::to_unix_from_date'],
[50303, 'unix_timestamp', 'INT', ['VARCHAR', 'VARCHAR'], 'TimeFunctions::to_unix_from_datetime_with_format'],
[50304, 'from_unixtime', 'VARCHAR', ['INT'], 'TimeFunctions::from_unix_to_datetime'],
[50305, 'from_unixtime', 'VARCHAR', ['INT', 'VARCHAR'], 'TimeFunctions::from_unix_to_datetime_with_format', 'TimeFunctions::from_unix_prepare', 'TimeFunctions::from_unix_close'],
[50310, 'dayname', 'VARCHAR', ['DATETIME'], 'TimeFunctions::day_name'],
[50311, 'monthname', 'VARCHAR', ['DATETIME'], 'TimeFunctions::month_name'],
[50320, 'convert_tz', 'DATETIME', ['DATETIME', 'VARCHAR', 'VARCHAR'], 'TimeFunctions::convert_tz', 'TimeFunctions::convert_tz_prepare', 'TimeFunctions::convert_tz_close'],
[50330, 'utc_timestamp', 'DATETIME', [], 'TimeFunctions::utc_timestamp'],
[50340, 'date_trunc', 'DATETIME', ['VARCHAR', 'DATETIME'], 'TimeFunctions::datetime_trunc', 'TimeFunctions::datetime_trunc_prepare', 'TimeFunctions::datetime_trunc_close'],
[50350, 'date_trunc', 'DATE', ['VARCHAR', 'DATE'], 'TimeFunctions::date_trunc', 'TimeFunctions::date_trunc_prepare', 'TimeFunctions::date_trunc_close'],
[50360, 'timestamp', 'DATETIME', ['DATETIME'], 'TimeFunctions::timestamp'],
# 60xxx: like predicate
# important ref: LikePredicate.java, must keep name equals LikePredicate.Operator
[60010, 'LIKE', 'BOOLEAN', ['VARCHAR', 'VARCHAR'], 'LikePredicate::like', 'LikePredicate::like_prepare',
'LikePredicate::like_close'],
[60020, 'REGEXP', 'BOOLEAN', ['VARCHAR', 'VARCHAR'], 'LikePredicate::regex', 'LikePredicate::regex_prepare',
'LikePredicate::regex_close'],
# 70xxx: condition functions
# In fact, condition function will use condition express, not function. There just support
# a function name for FE.
# Why use express and not functions? I think one of the reasons is performance, we need to
# execute all the children expressions ahead of time in function_call_expr, but condition
# expressions may not need execute all children expressions if the condition be true ahead
# of time
[70100, 'if', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN', 'BOOLEAN'], 'nullptr'],
[70101, 'if', 'TINYINT', ['BOOLEAN', 'TINYINT', 'TINYINT'], 'nullptr'],
[70102, 'if', 'SMALLINT', ['BOOLEAN', 'SMALLINT', 'SMALLINT'], 'nullptr'],
[70103, 'if', 'INT', ['BOOLEAN', 'INT', 'INT'], 'nullptr'],
[70104, 'if', 'BIGINT', ['BOOLEAN', 'BIGINT', 'BIGINT'], 'nullptr'],
[70105, 'if', 'LARGEINT', ['BOOLEAN', 'LARGEINT', 'LARGEINT'], 'nullptr'],
[70106, 'if', 'FLOAT', ['BOOLEAN', 'FLOAT', 'FLOAT'], 'nullptr'],
[70107, 'if', 'DOUBLE', ['BOOLEAN', 'DOUBLE', 'DOUBLE'], 'nullptr'],
[70108, 'if', 'DATETIME', ['BOOLEAN', 'DATETIME', 'DATETIME'], 'nullptr'],
[70109, 'if', 'DATE', ['BOOLEAN', 'DATE', 'DATE'], 'nullptr'],
[70110, 'if', 'DECIMALV2', ['BOOLEAN', 'DECIMALV2', 'DECIMALV2'], 'nullptr'],
[701100, 'if', 'DECIMAL32', ['BOOLEAN', 'DECIMAL32', 'DECIMAL32'], 'nullptr'],
[701101, 'if', 'DECIMAL64', ['BOOLEAN', 'DECIMAL64', 'DECIMAL64'], 'nullptr'],
[701102, 'if', 'DECIMAL128', ['BOOLEAN', 'DECIMAL128', 'DECIMAL128'], 'nullptr'],
[70111, 'if', 'VARCHAR', ['BOOLEAN', 'VARCHAR', 'VARCHAR'], 'nullptr'],
[70112, 'if', 'BITMAP', ['BOOLEAN', 'BITMAP', 'BITMAP'], 'nullptr'],
[70113, 'if', 'PERCENTILE', ['BOOLEAN', 'PERCENTILE', 'PERCENTILE'], 'nullptr'],
[70114, 'if', 'HLL', ['BOOLEAN', 'HLL', 'HLL'], 'nullptr'],
[70115, 'if', 'TIME', ['BOOLEAN', 'TIME', 'TIME'], 'nullptr'],
[70200, 'ifnull', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], 'nullptr'],
[70201, 'ifnull', 'TINYINT', ['TINYINT', 'TINYINT'], 'nullptr'],
[70202, 'ifnull', 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'nullptr'],
[70203, 'ifnull', 'INT', ['INT', 'INT'], 'nullptr'],
[70204, 'ifnull', 'BIGINT', ['BIGINT', 'BIGINT'], 'nullptr'],
[70205, 'ifnull', 'LARGEINT', ['LARGEINT', 'LARGEINT'], 'nullptr'],
[70206, 'ifnull', 'FLOAT', ['FLOAT', 'FLOAT'], 'nullptr'],
[70207, 'ifnull', 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'nullptr'],
[70208, 'ifnull', 'DATE', ['DATE', 'DATE'], 'nullptr'],
[70209, 'ifnull', 'DATETIME', ['DATETIME', 'DATETIME'], 'nullptr'],
[70210, 'ifnull', 'DECIMALV2', ['DECIMALV2', 'DECIMALV2'], 'nullptr'],
[702100, 'ifnull', 'DECIMAL32', ['DECIMAL32', 'DECIMAL32'], 'nullptr'],
[702101, 'ifnull', 'DECIMAL64', ['DECIMAL64', 'DECIMAL64'], 'nullptr'],
[702102, 'ifnull', 'DECIMAL128', ['DECIMAL128', 'DECIMAL128'], 'nullptr'],
[70211, 'ifnull', 'VARCHAR', ['VARCHAR', 'VARCHAR'], 'nullptr'],
[70212, 'ifnull', 'BITMAP', ['BITMAP', 'BITMAP'], 'nullptr'],
[70213, 'ifnull', 'PERCENTILE', ['PERCENTILE', 'PERCENTILE'], 'nullptr'],
[70214, 'ifnull', 'HLL', ['HLL', 'HLL'], 'nullptr'],
[70215, 'ifnull', 'TIME', ['TIME', 'TIME'], 'nullptr'],
[70300, 'nullif', 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], 'nullptr'],
[70301, 'nullif', 'TINYINT', ['TINYINT', 'TINYINT'], 'nullptr'],
[70302, 'nullif', 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'nullptr'],
[70303, 'nullif', 'INT', ['INT', 'INT'], 'nullptr'],
[70304, 'nullif', 'BIGINT', ['BIGINT', 'BIGINT'], 'nullptr'],
[70305, 'nullif', 'LARGEINT', ['LARGEINT', 'LARGEINT'], 'nullptr'],
[70306, 'nullif', 'FLOAT', ['FLOAT', 'FLOAT'], 'nullptr'],
[70307, 'nullif', 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'nullptr'],
[70308, 'nullif', 'DATE', ['DATE', 'DATE'], 'nullptr'],
[70309, 'nullif', 'DATETIME', ['DATETIME', 'DATETIME'], 'nullptr'],
[70310, 'nullif', 'DECIMALV2', ['DECIMALV2', 'DECIMALV2'], 'nullptr'],
[703100, 'nullif', 'DECIMAL32', ['DECIMAL32', 'DECIMAL32'], 'nullptr'],
[703101, 'nullif', 'DECIMAL64', ['DECIMAL64', 'DECIMAL64'], 'nullptr'],
[703102, 'nullif', 'DECIMAL128', ['DECIMAL128', 'DECIMAL128'], 'nullptr'],
[70311, 'nullif', 'VARCHAR', ['VARCHAR', 'VARCHAR'], 'nullptr'],
[70312, 'nullif', 'BITMAP', ['BITMAP', 'BITMAP'], 'nullptr'],
[70313, 'nullif', 'PERCENTILE', ['PERCENTILE', 'PERCENTILE'], 'nullptr'],
[70314, 'nullif', 'HLL', ['HLL', 'HLL'], 'nullptr'],
[70315, 'nullif', 'TIME', ['TIME', 'TIME'], 'nullptr'],
[70400, 'coalesce', 'BOOLEAN', ['BOOLEAN', '...'], 'nullptr'],
[70401, 'coalesce', 'TINYINT', ['TINYINT', '...'], 'nullptr'],
[70402, 'coalesce', 'SMALLINT', ['SMALLINT', '...'], 'nullptr'],
[70403, 'coalesce', 'INT', ['INT', '...'], 'nullptr'],
[70404, 'coalesce', 'BIGINT', ['BIGINT', '...'], 'nullptr'],
[70405, 'coalesce', 'LARGEINT', ['LARGEINT', '...'], 'nullptr'],
[70406, 'coalesce', 'FLOAT', ['FLOAT', '...'], 'nullptr'],
[70407, 'coalesce', 'DOUBLE', ['DOUBLE', '...'], 'nullptr'],
[70408, 'coalesce', 'DATETIME', ['DATETIME', '...'], 'nullptr'],
[70409, 'coalesce', 'DATE', ['DATE', '...'], 'nullptr'],
[70410, 'coalesce', 'DECIMALV2', ['DECIMALV2', '...'], 'nullptr'],
[704100, 'coalesce', 'DECIMAL32', ['DECIMAL32', '...'], 'nullptr'],
[704101, 'coalesce', 'DECIMAL64', ['DECIMAL64', '...'], 'nullptr'],
[704102, 'coalesce', 'DECIMAL128', ['DECIMAL128', '...'], 'nullptr'],
[70411, 'coalesce', 'VARCHAR', ['VARCHAR', '...'], 'nullptr'],
[70412, 'coalesce', 'BITMAP', ['BITMAP', '...'], 'nullptr'],
[70413, 'coalesce', 'PERCENTILE', ['PERCENTILE', '...'], 'nullptr'],
[70414, 'coalesce', 'HLL', ['HLL', '...'], 'nullptr'],
[70416, 'coalesce', 'TIME', ['TIME', '...'], 'nullptr'],
[70415, 'esquery', 'BOOLEAN', ['VARCHAR', 'VARCHAR'], 'ESFunctions::match'],
# hyperloglog function
[80010, 'hll_cardinality', 'BIGINT', ['HLL'], 'HyperloglogFunction::hll_cardinality'],
[80011, 'hll_cardinality', 'BIGINT', ['VARCHAR'], 'HyperloglogFunction::hll_cardinality_from_string'],
[80020, 'hll_hash', 'HLL', ['VARCHAR'], 'HyperloglogFunction::hll_hash'],
[80030, 'hll_empty', 'HLL', [], 'HyperloglogFunction::hll_empty'],
# bitmap function
[90010, 'to_bitmap', 'BITMAP', ['VARCHAR'], 'BitmapFunctions::to_bitmap'],
[90020, 'bitmap_hash', 'BITMAP', ['VARCHAR'], 'BitmapFunctions::bitmap_hash'],
[90030, 'bitmap_count', 'BIGINT', ['BITMAP'], 'BitmapFunctions::bitmap_count'],
[90040, 'bitmap_empty', 'BITMAP', [], 'BitmapFunctions::bitmap_empty'],
[90050, 'bitmap_or', 'BITMAP', ['BITMAP', 'BITMAP'], 'BitmapFunctions::bitmap_or'],
[90060, 'bitmap_and', 'BITMAP', ['BITMAP', 'BITMAP'], 'BitmapFunctions::bitmap_and'],
[90070, 'bitmap_to_string', 'VARCHAR', ['BITMAP'], 'BitmapFunctions::bitmap_to_string'],
[90080, 'bitmap_from_string', 'BITMAP', ['VARCHAR'], 'BitmapFunctions::bitmap_from_string'],
[90090, 'bitmap_contains', 'BOOLEAN', ['BITMAP', 'BIGINT'], 'BitmapFunctions::bitmap_contains'],
[90100, 'bitmap_has_any', 'BOOLEAN', ['BITMAP', 'BITMAP'], 'BitmapFunctions::bitmap_has_any'],
[90200, 'bitmap_andnot', 'BITMAP', ['BITMAP', 'BITMAP'], 'BitmapFunctions::bitmap_andnot'],
[90300, 'bitmap_xor', 'BITMAP', ['BITMAP', 'BITMAP'], 'BitmapFunctions::bitmap_xor'],
[90400, 'bitmap_remove', 'BITMAP', ['BITMAP', 'BIGINT'], 'BitmapFunctions::bitmap_remove'],
[90500, 'bitmap_to_array', 'ARRAY_BIGINT', ['BITMAP'], 'BitmapFunctions::bitmap_to_array'],
[90600, 'bitmap_max', 'BIGINT', ['BITMAP'], 'BitmapFunctions::bitmap_max'],
[90700, 'bitmap_min', 'BIGINT', ['BITMAP'], 'BitmapFunctions::bitmap_min'],
# hash function
[100010, 'murmur_hash3_32', 'INT', ['VARCHAR', '...'], 'HashFunctions::murmur_hash3_32'],
# Utility functions
[100011, 'sleep', 'BOOLEAN', ['INT'], "UtilityFunctions::sleep"],
[100012, 'version', 'VARCHAR', [], "UtilityFunctions::version"],
[100013, 'current_version', 'VARCHAR', [], "UtilityFunctions::current_version"],
[100014, 'last_query_id', 'VARCHAR', [], "UtilityFunctions::last_query_id"],
[100015, 'uuid', 'VARCHAR', [], "UtilityFunctions::uuid"],
# json string function
[110000, "get_json_int", "INT", ["VARCHAR", "VARCHAR"], "JsonFunctions::get_json_int",
"JsonFunctions::json_path_prepare", "JsonFunctions::json_path_close"],
[110001, "get_json_double", "DOUBLE", ["VARCHAR", "VARCHAR"], "JsonFunctions::get_json_double",
"JsonFunctions::json_path_prepare", "JsonFunctions::json_path_close"],
[110002, "get_json_string", "VARCHAR", ["VARCHAR", "VARCHAR"], "JsonFunctions::get_json_string",
"JsonFunctions::json_path_prepare", "JsonFunctions::json_path_close"],
# json type function
[110003, "parse_json", "JSON", ["VARCHAR"], "JsonFunctions::parse_json"],
[110004, "json_string", "VARCHAR", ["JSON"], "JsonFunctions::json_string"],
[110005, "json_query", "JSON", ["JSON", "VARCHAR"], "JsonFunctions::json_query",
"JsonFunctions::native_json_path_prepare", "JsonFunctions::native_json_path_close"],
# [110006, "json_value", "JSON", ["JSON", "VARCHAR"], "JsonFunctions::json_query"],
[110007, "json_exists", "BOOLEAN", ["JSON", "VARCHAR"], "JsonFunctions::json_exists",
"JsonFunctions::native_json_path_prepare", "JsonFunctions::native_json_path_close"],
[110008, "json_object", "JSON", ["JSON", "..."], "JsonFunctions::json_object"],
[110009, "json_array", "JSON", ["JSON", "..."], "JsonFunctions::json_array"],
[110010, "json_object", "JSON", [], "JsonFunctions::json_object_empty"],
[110011, "json_array", "JSON", [], "JsonFunctions::json_array_empty"],
# aes and base64 function
[120100, "aes_encrypt", "VARCHAR", ["VARCHAR", "VARCHAR"], "EncryptionFunctions::aes_encrypt"],
[120110, "aes_decrypt", "VARCHAR", ["VARCHAR", "VARCHAR"], "EncryptionFunctions::aes_decrypt"],
[120120, "from_base64", "VARCHAR", ["VARCHAR"], "EncryptionFunctions::from_base64"],
[120130, "to_base64", "VARCHAR", ["VARCHAR"], "EncryptionFunctions::to_base64"],
[120140, "md5", "VARCHAR", ["VARCHAR"], "EncryptionFunctions::md5"],
[120150, "md5sum", "VARCHAR", ["VARCHAR", "..."], "EncryptionFunctions::md5sum"],
[120160, "sha2", "VARCHAR", ["VARCHAR", "INT"], "EncryptionFunctions::sha2", "EncryptionFunctions::sha2_prepare", "EncryptionFunctions::sha2_close"],
# geo function
[120000, "ST_Point", "VARCHAR", ["DOUBLE", "DOUBLE"], "GeoFunctions::st_point"],
[120001, "ST_X", "DOUBLE", ["VARCHAR"], "GeoFunctions::st_x"],
[120002, "ST_Y", "DOUBLE", ["VARCHAR"], "GeoFunctions::st_y"],
[120003, "ST_Distance_Sphere", "DOUBLE", ["DOUBLE", "DOUBLE", "DOUBLE", "DOUBLE"], "GeoFunctions::st_distance_sphere"],
[120004, "ST_AsText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_as_wkt"],
[120005, "ST_AsWKT", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_as_wkt"],
[120006, "ST_GeometryFromText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_from_wkt", "GeoFunctions::st_from_wkt_prepare", "GeoFunctions::st_from_wkt_close"],
[120007, "ST_GeomFromText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_from_wkt", "GeoFunctions::st_from_wkt_prepare", "GeoFunctions::st_from_wkt_close"],
[120008, "ST_LineFromText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_line", "GeoFunctions::st_line_prepare", "GeoFunctions::st_from_wkt_close"],
[120009, "ST_LineStringFromText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_line", "GeoFunctions::st_line_prepare", "GeoFunctions::st_from_wkt_close"],
[120010, "ST_Polygon", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_polygon", "GeoFunctions::st_polygon_prepare", "GeoFunctions::st_from_wkt_close"],
[120011, "ST_PolyFromText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_polygon", "GeoFunctions::st_polygon_prepare", "GeoFunctions::st_from_wkt_close"],
[120012, "ST_PolygonFromText", "VARCHAR", ["VARCHAR"], "GeoFunctions::st_polygon", "GeoFunctions::st_polygon_prepare", "GeoFunctions::st_from_wkt_close"],
[120013, "ST_Circle", "VARCHAR", ["DOUBLE", "DOUBLE", "DOUBLE"], "GeoFunctions::st_circle", "GeoFunctions::st_circle_prepare", "GeoFunctions::st_from_wkt_close"],
[120014, "ST_Contains", "BOOLEAN", ["VARCHAR", "VARCHAR"], "GeoFunctions::st_contains", "GeoFunctions::st_contains_prepare", "GeoFunctions::st_contains_close"],
# percentile function
[130000, 'percentile_hash', 'PERCENTILE', ['DOUBLE'], 'PercentileFunctions::percentile_hash'],
[130001, 'percentile_empty', 'PERCENTILE', [], 'PercentileFunctions::percentile_empty'],
[130002, 'percentile_approx_raw', 'DOUBLE', ['PERCENTILE', 'DOUBLE'], 'PercentileFunctions::percentile_approx_raw'],
[140000, 'grouping_id', 'BIGINT', ['BIGINT'], 'GroupingSetsFunctions::grouping_id'],
[140001, 'grouping', 'BIGINT', ['BIGINT'], 'GroupingSetsFunctions::grouping'],
[150000, 'array_length', 'INT', ['ANY_ARRAY'], 'ArrayFunctions::array_length'],
[150001, 'array_append', 'ANY_ARRAY', ['ANY_ARRAY', 'ANY_ELEMENT'], 'ArrayFunctions::array_append'],
[150002, 'array_contains', 'BOOLEAN', ['ANY_ARRAY', 'ANY_ELEMENT'], 'ArrayFunctions::array_contains'],
#sum
[150003, 'array_sum', 'BIGINT', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_sum_boolean'],
[150004, 'array_sum', 'BIGINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_sum_tinyint'],
[150005, 'array_sum', 'BIGINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_sum_smallint'],
[150006, 'array_sum', 'BIGINT', ['ARRAY_INT'], 'ArrayFunctions::array_sum_int'],
[150007, 'array_sum', 'BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_sum_bigint'],
[150008, 'array_sum', 'LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_sum_largeint'],
[150009, 'array_sum', 'DOUBLE', ['ARRAY_FLOAT'], 'ArrayFunctions::array_sum_float'],
[150010, 'array_sum', 'DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_sum_double'],
[150011, 'array_sum', 'DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_sum_decimalv2'],
#[150012, 'array_sum', 'DECIMAL64', ['ARRAY_DECIMAL32'], 'ArrayFunctions::array_sum'],
#[150013, 'array_sum', 'DECIMAL64', ['ARRAY_DECIMAL64'], 'ArrayFunctions::array_sum'],
#[150014, 'array_sum', 'DECIMAL128', ['ARRAY_DECIMAL128'], 'ArrayFunctions::array_sum'],
#avg
[150023, 'array_avg', 'DOUBLE', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_avg_boolean'],
[150024, 'array_avg', 'DOUBLE', ['ARRAY_TINYINT'], 'ArrayFunctions::array_avg_tinyint'],
[150025, 'array_avg', 'DOUBLE', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_avg_smallint'],
[150026, 'array_avg', 'DOUBLE', ['ARRAY_INT'], 'ArrayFunctions::array_avg_int'],
[150027, 'array_avg', 'DOUBLE', ['ARRAY_BIGINT'], 'ArrayFunctions::array_avg_bigint'],
[150028, 'array_avg', 'DOUBLE', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_avg_largeint'],
[150029, 'array_avg', 'DOUBLE', ['ARRAY_FLOAT'], 'ArrayFunctions::array_avg_float'],
[150030, 'array_avg', 'DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_avg_double'],
[150031, 'array_avg', 'DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_avg_decimalv2'],
#[150032, 'array_avg', 'DATE', ['ARRAY_DATE'], 'ArrayFunctions::array_avg_date'],
#[150033, 'array_avg', 'DATETIME', ['ARRAY_DATETIME'], 'ArrayFunctions::array_avg_datetime'],
#[150012, 'array_avg', 'DECIMAL64', ['ARRAY_DECIMAL32'], 'ArrayFunctions::array_avg'],
#[150013, 'array_avg', 'DECIMAL64', ['ARRAY_DECIMAL64'], 'ArrayFunctions::array_avg'],
#[150014, 'array_avg', 'DECIMAL128', ['ARRAY_DECIMAL128'], 'ArrayFunctions::array_avg'],
#min
[150043, 'array_min', 'BOOLEAN', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_min_boolean'],
[150044, 'array_min', 'TINYINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_min_tinyint'],
[150045, 'array_min', 'SMALLINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_min_smallint'],
[150046, 'array_min', 'INT', ['ARRAY_INT'], 'ArrayFunctions::array_min_int'],
[150047, 'array_min', 'BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_min_bigint'],
[150048, 'array_min', 'LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_min_largeint'],
[150049, 'array_min', 'FLOAT', ['ARRAY_FLOAT'], 'ArrayFunctions::array_min_float'],
[150050, 'array_min', 'DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_min_double'],
[150051, 'array_min', 'DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_min_decimalv2'],
[150052, 'array_min', 'DATE', ['ARRAY_DATE'], 'ArrayFunctions::array_min_date'],
[150053, 'array_min', 'DATETIME', ['ARRAY_DATETIME'], 'ArrayFunctions::array_min_datetime'],
[150054, 'array_min', 'VARCHAR', ['ARRAY_VARCHAR'], 'ArrayFunctions::array_min_varchar'],
#[150012, 'array_min', 'DECIMAL64', ['ARRAY_DECIMAL32'], 'ArrayFunctions::array_min'],
#[150013, 'array_min', 'DECIMAL64', ['ARRAY_DECIMAL64'], 'ArrayFunctions::array_min'],
#[150014, 'array_min', 'DECIMAL128', ['ARRAY_DECIMAL128'], 'ArrayFunctions::array_min'],
#max
[150063, 'array_max', 'BOOLEAN', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_max_boolean'],
[150064, 'array_max', 'TINYINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_max_tinyint'],
[150065, 'array_max', 'SMALLINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_max_smallint'],
[150066, 'array_max', 'INT', ['ARRAY_INT'], 'ArrayFunctions::array_max_int'],
[150067, 'array_max', 'BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_max_bigint'],
[150068, 'array_max', 'LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_max_largeint'],
[150069, 'array_max', 'FLOAT', ['ARRAY_FLOAT'], 'ArrayFunctions::array_max_float'],
[150070, 'array_max', 'DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_max_double'],
[150071, 'array_max', 'DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_max_decimalv2'],
[150072, 'array_max', 'DATE', ['ARRAY_DATE'], 'ArrayFunctions::array_max_date'],
[150073, 'array_max', 'DATETIME', ['ARRAY_DATETIME'], 'ArrayFunctions::array_max_datetime'],
[150074, 'array_max', 'VARCHAR', ['ARRAY_VARCHAR'], 'ArrayFunctions::array_max_varchar'],
#[150012, 'array_max', 'DECIMAL64', ['ARRAY_DECIMAL32'], 'ArrayFunctions::array_max'],
#[150013, 'array_max', 'DECIMAL64', ['ARRAY_DECIMAL64'], 'ArrayFunctions::array_max'],
#[150014, 'array_max', 'DECIMAL128', ['ARRAY_DECIMAL128'], 'ArrayFunctions::array_max'],
[150083, 'array_remove', 'ANY_ARRAY', ['ANY_ARRAY', 'ANY_ELEMENT'], 'ArrayFunctions::array_remove'],
[150084, 'array_position', 'INT', ['ANY_ARRAY', 'ANY_ELEMENT'], 'ArrayFunctions::array_position'],
[150090, 'array_distinct', 'ARRAY_BOOLEAN', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_distinct_boolean'],
[150091, 'array_distinct', 'ARRAY_TINYINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_distinct_tinyint'],
[150092, 'array_distinct', 'ARRAY_SMALLINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_distinct_smallint'],
[150093, 'array_distinct', 'ARRAY_INT', ['ARRAY_INT'], 'ArrayFunctions::array_distinct_int'],
[150094, 'array_distinct', 'ARRAY_BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_distinct_bigint'],
[150095, 'array_distinct', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_distinct_largeint'],
[150096, 'array_distinct', 'ARRAY_FLOAT', ['ARRAY_FLOAT'], 'ArrayFunctions::array_distinct_float'],
[150097, 'array_distinct', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_distinct_double'],
[150098, 'array_distinct', 'ARRAY_VARCHAR', ['ARRAY_VARCHAR'], 'ArrayFunctions::array_distinct_varchar'],
[150099, 'array_distinct', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_distinct_decimalv2'],
[150100, 'array_distinct', 'ARRAY_DATETIME', ['ARRAY_DATETIME'], 'ArrayFunctions::array_distinct_datetime'],
[150101, 'array_distinct', 'ARRAY_DATE', ['ARRAY_DATE'], 'ArrayFunctions::array_distinct_date'],
[150110, 'array_sort', 'ARRAY_BOOLEAN', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_sort_boolean'],
[150111, 'array_sort', 'ARRAY_TINYINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_sort_tinyint'],
[150112, 'array_sort', 'ARRAY_SMALLINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_sort_smallint'],
[150113, 'array_sort', 'ARRAY_INT', ['ARRAY_INT'], 'ArrayFunctions::array_sort_int'],
[150114, 'array_sort', 'ARRAY_BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_sort_bigint'],
[150115, 'array_sort', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_sort_largeint'],
[150116, 'array_sort', 'ARRAY_FLOAT', ['ARRAY_FLOAT'], 'ArrayFunctions::array_sort_float'],
[150117, 'array_sort', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_sort_double'],
[150118, 'array_sort', 'ARRAY_VARCHAR', ['ARRAY_VARCHAR'], 'ArrayFunctions::array_sort_varchar'],
[150119, 'array_sort', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_sort_decimalv2'],
[150120, 'array_sort', 'ARRAY_DATETIME', ['ARRAY_DATETIME'], 'ArrayFunctions::array_sort_datetime'],
[150121, 'array_sort', 'ARRAY_DATE', ['ARRAY_DATE'], 'ArrayFunctions::array_sort_date'],
[150130, 'reverse', 'ARRAY_BOOLEAN', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_reverse_boolean'],
[150131, 'reverse', 'ARRAY_TINYINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_reverse_tinyint'],
[150132, 'reverse', 'ARRAY_SMALLINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_reverse_smallint'],
[150133, 'reverse', 'ARRAY_INT', ['ARRAY_INT'], 'ArrayFunctions::array_reverse_int'],
[150134, 'reverse', 'ARRAY_BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_reverse_bigint'],
[150135, 'reverse', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_reverse_largeint'],
[150136, 'reverse', 'ARRAY_FLOAT', ['ARRAY_FLOAT'], 'ArrayFunctions::array_reverse_float'],
[150137, 'reverse', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_reverse_double'],
[150138, 'reverse', 'ARRAY_VARCHAR', ['ARRAY_VARCHAR'], 'ArrayFunctions::array_reverse_varchar'],
[150139, 'reverse', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_reverse_decimalv2'],
[150140, 'reverse', 'ARRAY_DATETIME', ['ARRAY_DATETIME'], 'ArrayFunctions::array_reverse_datetime'],
[150141, 'reverse', 'ARRAY_DATE', ['ARRAY_DATE'], 'ArrayFunctions::array_reverse_date'],
[150150, 'array_join', 'VARCHAR', ['ARRAY_VARCHAR', 'VARCHAR'], 'ArrayFunctions::array_join_varchar'],
[150151, 'array_join', 'VARCHAR', ['ARRAY_VARCHAR', 'VARCHAR', 'VARCHAR'], 'ArrayFunctions::array_join_varchar'],
[150160, 'array_difference', 'ARRAY_BIGINT', ['ARRAY_BOOLEAN'], 'ArrayFunctions::array_difference_boolean'],
[150161, 'array_difference', 'ARRAY_BIGINT', ['ARRAY_TINYINT'], 'ArrayFunctions::array_difference_tinyint'],
[150162, 'array_difference', 'ARRAY_BIGINT', ['ARRAY_SMALLINT'], 'ArrayFunctions::array_difference_smallint'],
[150163, 'array_difference', 'ARRAY_BIGINT', ['ARRAY_INT'], 'ArrayFunctions::array_difference_int'],
[150164, 'array_difference', 'ARRAY_BIGINT', ['ARRAY_BIGINT'], 'ArrayFunctions::array_difference_bigint'],
[150165, 'array_difference', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT'], 'ArrayFunctions::array_difference_largeint'],
[150166, 'array_difference', 'ARRAY_DOUBLE', ['ARRAY_FLOAT'], 'ArrayFunctions::array_difference_float'],
[150167, 'array_difference', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE'], 'ArrayFunctions::array_difference_double'],
[150168, 'array_difference', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2'], 'ArrayFunctions::array_difference_decimalv2'],
[150170, 'array_slice', 'ARRAY_DATE', ['ARRAY_DATE', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_date'],
[150171, 'array_slice', 'ARRAY_DATETIME', ['ARRAY_DATETIME', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_datetime'],
[150172, 'array_slice', 'ARRAY_BOOLEAN', ['ARRAY_BOOLEAN', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_boolean'],
[150173, 'array_slice', 'ARRAY_TINYINT', ['ARRAY_TINYINT', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_tinyint'],
[150174, 'array_slice', 'ARRAY_SMALLINT', ['ARRAY_SMALLINT', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_smallint'],
[150175, 'array_slice', 'ARRAY_INT', ['ARRAY_INT', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_int'],
[150176, 'array_slice', 'ARRAY_BIGINT', ['ARRAY_BIGINT', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_bigint'],
[150177, 'array_slice', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_largeint'],
[150178, 'array_slice', 'ARRAY_FLOAT', ['ARRAY_FLOAT', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_float'],
[150179, 'array_slice', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_double'],
[150180, 'array_slice', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_decimalv2'],
[150181, 'array_slice', 'ARRAY_VARCHAR', ['ARRAY_VARCHAR', 'BIGINT', 'BIGINT'], 'ArrayFunctions::array_slice_varchar'],
[150190, 'array_concat', 'ARRAY_DATE', ['ARRAY_DATE', "..."], 'ArrayFunctions::array_concat_date'],
[150191, 'array_concat', 'ARRAY_DATETIME', ['ARRAY_DATETIME', "..."], 'ArrayFunctions::array_concat_datetime'],
[150192, 'array_concat', 'ARRAY_BOOLEAN', ['ARRAY_BOOLEAN', "..."], 'ArrayFunctions::array_concat_boolean'],
[150193, 'array_concat', 'ARRAY_TINYINT', ['ARRAY_TINYINT', "..."], 'ArrayFunctions::array_concat_tinyint'],
[150194, 'array_concat', 'ARRAY_SMALLINT', ['ARRAY_SMALLINT', "..."], 'ArrayFunctions::array_concat_smallint'],
[150195, 'array_concat', 'ARRAY_INT', ['ARRAY_INT', "..."], 'ArrayFunctions::array_concat_int'],
[150196, 'array_concat', 'ARRAY_BIGINT', ['ARRAY_BIGINT', "..."], 'ArrayFunctions::array_concat_bigint'],
[150197, 'array_concat', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT', "..."], 'ArrayFunctions::array_concat_largeint'],
[150198, 'array_concat', 'ARRAY_FLOAT', ['ARRAY_FLOAT', "..."], 'ArrayFunctions::array_concat_float'],
[150199, 'array_concat', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE', "..."], 'ArrayFunctions::array_concat_double'],
[150200, 'array_concat', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2', "..."], 'ArrayFunctions::array_concat_decimalv2'],
[150201, 'array_concat', 'ARRAY_VARCHAR', ['ARRAY_VARCHAR', "..."], 'ArrayFunctions::array_concat_varchar'],
[150210, 'array_overlap', 'BOOLEAN', ['ARRAY_DATE', 'ARRAY_DATE'], 'ArrayFunctions::array_overlap_date'],
[150211, 'array_overlap', 'BOOLEAN', ['ARRAY_DATETIME', 'ARRAY_DATETIME'], 'ArrayFunctions::array_overlap_datetime'],
[150212, 'array_overlap', 'BOOLEAN', ['ARRAY_BOOLEAN', 'ARRAY_BOOLEAN'], 'ArrayFunctions::array_overlap_boolean'],
[150213, 'array_overlap', 'BOOLEAN', ['ARRAY_TINYINT', 'ARRAY_TINYINT'], 'ArrayFunctions::array_overlap_tinyint'],
[150214, 'array_overlap', 'BOOLEAN', ['ARRAY_SMALLINT', 'ARRAY_SMALLINT'], 'ArrayFunctions::array_overlap_smallint'],
[150215, 'array_overlap', 'BOOLEAN', ['ARRAY_INT', 'ARRAY_INT'], 'ArrayFunctions::array_overlap_int'],
[150216, 'array_overlap', 'BOOLEAN', ['ARRAY_BIGINT', 'ARRAY_BIGINT'], 'ArrayFunctions::array_overlap_bigint'],
[150217, 'array_overlap', 'BOOLEAN', ['ARRAY_LARGEINT', 'ARRAY_LARGEINT'], 'ArrayFunctions::array_overlap_largeint'],
[150218, 'array_overlap', 'BOOLEAN', ['ARRAY_FLOAT', 'ARRAY_FLOAT'], 'ArrayFunctions::array_overlap_float'],
[150219, 'array_overlap', 'BOOLEAN', ['ARRAY_DOUBLE', 'ARRAY_DOUBLE'], 'ArrayFunctions::array_overlap_double'],
[150220, 'array_overlap', 'BOOLEAN', ['ARRAY_DECIMALV2', 'ARRAY_DECIMALV2'], 'ArrayFunctions::array_overlap_decimalv2'],
[150221, 'array_overlap', 'BOOLEAN', ['ARRAY_VARCHAR', 'ARRAY_VARCHAR'], 'ArrayFunctions::array_overlap_varchar'],
[150230, 'array_intersect', 'ARRAY_DATE', ['ARRAY_DATE', "..."], 'ArrayFunctions::array_intersect_date'],
[150231, 'array_intersect', 'ARRAY_DATETIME', ['ARRAY_DATETIME', "..."], 'ArrayFunctions::array_intersect_datetime'],
[150232, 'array_intersect', 'ARRAY_BOOLEAN', ['ARRAY_BOOLEAN', "..."], 'ArrayFunctions::array_intersect_boolean'],
[150233, 'array_intersect', 'ARRAY_TINYINT', ['ARRAY_TINYINT', "..."], 'ArrayFunctions::array_intersect_tinyint'],
[150234, 'array_intersect', 'ARRAY_SMALLINT', ['ARRAY_SMALLINT', "..."], 'ArrayFunctions::array_intersect_smallint'],
[150235, 'array_intersect', 'ARRAY_INT', ['ARRAY_INT', "..."], 'ArrayFunctions::array_intersect_int'],
[150236, 'array_intersect', 'ARRAY_BIGINT', ['ARRAY_BIGINT', "..."], 'ArrayFunctions::array_intersect_bigint'],
[150237, 'array_intersect', 'ARRAY_LARGEINT', ['ARRAY_LARGEINT', "..."], 'ArrayFunctions::array_intersect_largeint'],
[150238, 'array_intersect', 'ARRAY_FLOAT', ['ARRAY_FLOAT', "..."], 'ArrayFunctions::array_intersect_float'],
[150239, 'array_intersect', 'ARRAY_DOUBLE', ['ARRAY_DOUBLE', "..."], 'ArrayFunctions::array_intersect_double'],
[150240, 'array_intersect', 'ARRAY_DECIMALV2', ['ARRAY_DECIMALV2', "..."], 'ArrayFunctions::array_intersect_decimalv2'],
[150241, 'array_intersect', 'ARRAY_VARCHAR', ['ARRAY_VARCHAR', "..."], 'ArrayFunctions::array_intersect_varchar'],
]
| 77.223315
| 182
| 0.652165
|
8924b1fecbeeee766a9c091702e1d158b0881832
| 707
|
py
|
Python
|
python--learnings/coding-practice/hourglass_sums.py
|
jekhokie/scriptbox
|
93c03d8ab9b7e7cd9c5c6a65b444392ffe92fd70
|
[
"MIT"
] | 11
|
2020-03-29T09:12:25.000Z
|
2022-03-24T01:01:50.000Z
|
python--learnings/coding-practice/hourglass_sums.py
|
jekhokie/scriptbox
|
93c03d8ab9b7e7cd9c5c6a65b444392ffe92fd70
|
[
"MIT"
] | 5
|
2021-06-02T03:41:51.000Z
|
2022-02-26T03:48:50.000Z
|
python--learnings/coding-practice/hourglass_sums.py
|
jekhokie/scriptbox
|
93c03d8ab9b7e7cd9c5c6a65b444392ffe92fd70
|
[
"MIT"
] | 8
|
2019-02-01T13:33:14.000Z
|
2021-12-14T20:16:03.000Z
|
#!/usr/bin/env python
#
# Given a 6x6 array, find sum of all hourglass values where an hourglass is:
# X X X
# X
# X X X
#
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
def hourglassSum(arr):
sum_vals = []
for x in range(1, 5):
for y in range(1, 5):
vals = [arr[x-1][y-1], arr[x-1][y], arr[x-1][y+1], arr[x][y], arr[x+1][y-1], arr[x+1][y], arr[x+1][y+1]]
sum_vals.append(sum(vals))
return max(sum_vals)
if __name__ == '__main__':
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
result = hourglassSum(arr)
print(result)
| 22.09375
| 116
| 0.570014
|
faea131b4a7856c0f22a87570d0fcac3693cba59
| 172
|
py
|
Python
|
cookiecutter/fileupload/admin.py
|
binygreenberg/cookiecutter
|
ed32c55505dc49faaf75cb6739c30e98f995969d
|
[
"MIT"
] | null | null | null |
cookiecutter/fileupload/admin.py
|
binygreenberg/cookiecutter
|
ed32c55505dc49faaf75cb6739c30e98f995969d
|
[
"MIT"
] | null | null | null |
cookiecutter/fileupload/admin.py
|
binygreenberg/cookiecutter
|
ed32c55505dc49faaf75cb6739c30e98f995969d
|
[
"MIT"
] | null | null | null |
from .models import Picture
from django.contrib import admin
@admin.register(Picture)
class MyPictureAdmin(admin.ModelAdmin):
list_display = ('profile','slug','file')
| 24.571429
| 44
| 0.767442
|
f299c576b66a3595cc5ef78f557f55a4349a8138
| 5,190
|
py
|
Python
|
uiautomation/pages/tmhomepage.py
|
fingerella2000/e2enuggets
|
8f2c6a88106985a3409bb032de1d2b9b7dcfb2fe
|
[
"MIT"
] | null | null | null |
uiautomation/pages/tmhomepage.py
|
fingerella2000/e2enuggets
|
8f2c6a88106985a3409bb032de1d2b9b7dcfb2fe
|
[
"MIT"
] | null | null | null |
uiautomation/pages/tmhomepage.py
|
fingerella2000/e2enuggets
|
8f2c6a88106985a3409bb032de1d2b9b7dcfb2fe
|
[
"MIT"
] | null | null | null |
from uiautomation.pages.basepage import BasePage
from uiautomation.common import Constants
from uiautomation.elements import BasePageElement
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.action_chains import ActionChains
import time
class Locators(object):
dictionary = {
# """tmall home page elements"""
"body":(By.CSS_SELECTOR,"html > body"),
"search_bar":(By.CSS_SELECTOR,"#mq"),
"search_button":(By.CSS_SELECTOR,"#mallSearch > form > fieldset > div > button"),
"top1_product":(By.CSS_SELECTOR,"#J_ItemList > div:nth-child(1)"),
"top2_product":(By.CSS_SELECTOR,"#J_ItemList > div:nth-child(2)"),
"top3_product":(By.CSS_SELECTOR,"#J_ItemList > div:nth-child(3)"),
"next_page":(By.CSS_SELECTOR,"#content > div > div.ui-page > div > b.ui-page-num > a.ui-page-next")
}
class SearchBarElement(BasePageElement):
locator = Locators.dictionary["search_bar"]
class SearchButtonElement(BasePageElement):
locator = Locators.dictionary["search_button"]
class NextPageElement(BasePageElement):
locator = Locators.dictionary["next_page"]
class Top1ProductElement(BasePageElement):
locator = Locators.dictionary["top1_product"]
class Top2ProductElement(BasePageElement):
locator = Locators.dictionary["top2_product"]
class Top3ProductElement(BasePageElement):
locator = Locators.dictionary["top3_product"]
class TMHomePage(BasePage):
search_bar_element = SearchBarElement()
search_button_element = SearchButtonElement()
top1_product_element = Top1ProductElement()
top2_product_element = Top2ProductElement()
top3_product_element = Top3ProductElement()
next_page_element = NextPageElement()
def search(self, keywords):
WebDriverWait(self.driver, Constants.WAIT_TIME_SHORT).until(EC.visibility_of_any_elements_located(Locators.dictionary["body"]))
self._scrollDownAndUp()
"""entering search keywords"""
_search_bar = self.search_bar_element
_keywords_chain_actions = ActionChains(self.driver)
_keywords_chain_actions.move_to_element(_search_bar)
_keywords_chain_actions.click(_search_bar)
for c in list(keywords):
_keywords_chain_actions.send_keys(c)
_keywords_chain_actions.perform()
"""click search button"""
self.driver.element = self.search_button_element
self.driver.element.click()
# self._scrollDownAndUp()
return keywords in self.driver.title
def viewTop3Products(self):
self._scrollDownAndUp()
_top1_product = self.top1_product_element
_top1_product_actions = ActionChains(self.driver)
_top1_product_actions.move_to_element(_top1_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
self._viewNewTabAndCloseAfter()
_top2_product = self.top2_product_element
_top2_product_actions = ActionChains(self.driver)
_top2_product_actions.move_to_element(_top2_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
self._viewNewTabAndCloseAfter()
_tope3_product = self.top3_product_element
_tope3_product_actions = ActionChains(self.driver)
_tope3_product_actions.move_to_element(_tope3_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
self._viewNewTabAndCloseAfter()
return True
def viewTopPages(self, number_of_pages):
for i in range(number_of_pages):
print("viewing page: " + str(i+1))
self.viewTop3Products()
if i+1 == number_of_pages:
continue
self.driver.element = self.next_page_element
self.driver.element.click()
self.driver.switch_to_default_content()
return True
def _viewNewTabAndCloseAfter(self):
self.driver.switch_to_window(self.driver.window_handles[-1])
self._scrollDownAndUp()
self.driver.close()
self.driver.switch_to_window(self.driver.window_handles[-1])
self.driver.switch_to_default_content()
def _scrollDownAndUp(self):
_scroll_step = Constants.SCROLL_STEP
_scroll_interval = Constants.SCROLL_INTERVAL
"""scroll down"""
_last_height = self.driver.execute_script("return document.body.scrollHeight")
for h in range(int(_last_height/_scroll_step)):
time.sleep(_scroll_interval)
self.driver.execute_script("window.scrollTo(0," + str(_scroll_step*(h+1)) + ");")
"""scroll up"""
_last_height = self.driver.execute_script("return document.body.scrollHeight")
for h in range(int(_last_height/_scroll_step)):
time.sleep(_scroll_interval)
self.driver.execute_script("window.scrollTo(0," + str(_last_height - _scroll_step*(h+1)) + ");")
self.driver.execute_script("window.scrollTo(0, 0);")
| 45.526316
| 135
| 0.705588
|
891f85e9ac5ee1f4f9889cab4c551b2b23103bd4
| 9,694
|
py
|
Python
|
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/msrplistenerdomains.py
|
kakkotetsu/IxNetwork
|
f9fb614b51bb8988af035967991ad36702933274
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/msrplistenerdomains.py
|
kakkotetsu/IxNetwork
|
f9fb614b51bb8988af035967991ad36702933274
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/msrplistenerdomains.py
|
kakkotetsu/IxNetwork
|
f9fb614b51bb8988af035967991ad36702933274
|
[
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class MsrpListenerDomains(Base):
"""The MsrpListenerDomains class encapsulates a required msrpListenerDomains node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the MsrpListenerDomains property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'msrpListenerDomains'
def __init__(self, parent):
super(MsrpListenerDomains, self).__init__(parent)
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def SrClassIdType(self):
"""SR Class ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srClassIdType')
@property
def SrClassPriorityType(self):
"""SR Class Priority.Class A maps to priority 3 and Class B maps to priority 2.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srClassPriorityType')
@property
def SrClassVid(self):
"""VLAN ID that associated streams will be tagged with by Talker
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srClassVid')
def get_device_ids(self, PortNames=None, Active=None, SrClassIdType=None, SrClassPriorityType=None, SrClassVid=None):
"""Base class infrastructure that gets a list of msrpListenerDomains device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
SrClassIdType (str): optional regex of srClassIdType
SrClassPriorityType (str): optional regex of srClassPriorityType
SrClassVid (str): optional regex of srClassVid
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def FetchAndUpdateConfigFromCloud(self, Mode):
"""Executes the fetchAndUpdateConfigFromCloud operation on the server.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/globals?deepchild=*|/api/v1/sessions/1/ixnetwork/topology?deepchild=*)): The method internally sets Arg1 to the current href for this instance
Mode (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('FetchAndUpdateConfigFromCloud', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start MSRP Listener Domain
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start MSRP Listener Domain
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start MSRP Listener Domain
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, Arg2):
"""Executes the start operation on the server.
Start
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
Returns:
list(str): ID to associate each async action invocation
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('Start', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop MSRP Listener Domain
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop MSRP Listener Domain
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop MSRP Listener Domain
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, Arg2):
"""Executes the stop operation on the server.
Stop
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
Returns:
list(str): ID to associate each async action invocation
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('Stop', payload=locals(), response_object=None)
| 36.037175
| 190
| 0.737466
|
1a3d58f84d4056a1a8d51ee22a6bc740357a6f05
| 5,082
|
py
|
Python
|
src/transformers/models/reformer/tokenization_reformer_fast.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 2
|
2020-11-28T12:51:07.000Z
|
2020-11-28T12:52:14.000Z
|
src/transformers/models/reformer/tokenization_reformer_fast.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 1
|
2020-11-12T11:00:12.000Z
|
2020-11-12T11:00:12.000Z
|
src/transformers/models/reformer/tokenization_reformer_fast.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 1
|
2020-11-17T02:48:00.000Z
|
2020-11-17T02:48:00.000Z
|
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_reformer import ReformerTokenizer
else:
ReformerTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to file names for serializing Tokenizer instances
####################################################
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to pretrained vocabulary URL for all the model ids.
####################################################
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/spiece.model"
},
"tokenizer_file": {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/tokenizer.json"
},
}
####################################################
# Mapping from model ids to max length of inputs
####################################################
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/reformer-crime-and-punishment": 524288,
}
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece
<https://github.com/google/sentencepiece>`__ .
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
slow_tokenizer_class = ReformerTokenizer
def __init__(
self,
vocab_file,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| 38.793893
| 128
| 0.654073
|
6ca5de09096f221f24fb3ffd45968b6d8837860a
| 2,850
|
py
|
Python
|
python/games/flappypixel.py
|
umbc-hackafe/sign-drivers
|
cd2a80a455a0956f23afb5dc15eb67f48c69f289
|
[
"MIT"
] | null | null | null |
python/games/flappypixel.py
|
umbc-hackafe/sign-drivers
|
cd2a80a455a0956f23afb5dc15eb67f48c69f289
|
[
"MIT"
] | 1
|
2015-08-17T16:28:45.000Z
|
2015-08-17T16:28:45.000Z
|
python/games/flappypixel.py
|
umbc-hackafe/sign-drivers
|
cd2a80a455a0956f23afb5dc15eb67f48c69f289
|
[
"MIT"
] | null | null | null |
import graphics
import driver
import game
import random
import string
class FlappyPixel(game.Game):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset()
def reset(self):
self.sprites.clear()
self.playing = True
self.flappy = graphics.Rectangle(1, 1, x=24, y=0, wrapx=False, wrapy=False)
self.sprites.add(self.flappy)
# XXX: hard-coded width
self.scoretext = graphics.TextSprite("0", width=4, height=4, x=1, y=0)
self.sprites.add(self.scoretext)
self.terrain = self.terrain_gen()
self.ticks = 0
self.score = 0
self.up = 0
def terrain_gen(self):
while True:
width, height = 2, random.randint(1, 10)
if random.randint(0, 1):
top = 0
else:
top = 15 - height
yield graphics.Rectangle(width, height, 113, top)
def check_collision(self):
for sprite in self.sprites:
if sprite is self.flappy:
continue
if sprite.x <= self.flappy.x <= (sprite.x+sprite.width) and sprite.y <= self.flappy.y <= (sprite.y+sprite.height):
return True
return False
def scroll_terrain(self):
for sprite in list(self.sprites):
if sprite is not self.flappy and sprite is not self.scoretext:
sprite.x -= 1
if sprite.x < -sprite.width:
self.sprites.remove(sprite)
def loop(self):
if self.playing:
if set(string.ascii_lowercase + ' ').intersection(self.keys) and not self.up:
print("AHH")
self.up = 3
if self.up and not self.ticks % 3:
self.up -= 1
self.flappy.y -= self.up
elif not self.ticks % 4:
self.flappy.y += 1
if self.flappy.y > 15 or self.flappy.y < 0 or self.check_collision():
self.sprites = set([graphics.TextSprite("GAME OVER: SCORE %d" %
self.score, width=5, height=7),
graphics.TextSprite("R TO RELOAD", width=5, height=7, y=8)])
self.playing = False
return
if not self.ticks % 3:
self.scroll_terrain()
if not self.ticks % 45:
self.sprites.add(next(self.terrain))
# Only add to the score if you're in the process of passing
# terrain.
if self.ticks > 45 * 5:
self.score += 1
self.scoretext.set_text(str(self.score))
self.ticks += 1
else:
if 'r' in self.keys:
self.reset()
super().loop()
GAME = FlappyPixel
| 30.645161
| 126
| 0.512281
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.