hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48de78818b665dd3fbb2cb85b03940b713e2a9ee
| 93
|
py
|
Python
|
aioazstorage/__init__.py
|
rcarmo/aioazstorage
|
cdd06f68fe43b4b9308330744b639dd38ddcedb1
|
[
"MIT"
] | 2
|
2019-07-09T13:54:59.000Z
|
2021-01-23T15:25:27.000Z
|
aioazstorage/__init__.py
|
rcarmo/aioazstorage
|
cdd06f68fe43b4b9308330744b639dd38ddcedb1
|
[
"MIT"
] | 1
|
2020-02-04T15:08:53.000Z
|
2020-02-04T15:08:53.000Z
|
aioazstorage/__init__.py
|
rcarmo/aioazstorage
|
cdd06f68fe43b4b9308330744b639dd38ddcedb1
|
[
"MIT"
] | 1
|
2021-01-23T15:25:29.000Z
|
2021-01-23T15:25:29.000Z
|
from .tables import TableClient
from .queues import QueueClient
from .blobs import BlobClient
| 31
| 31
| 0.849462
|
c21c4b43407b0a45354aff31c8ecbb99d35b30bd
| 17,024
|
py
|
Python
|
encode_utils/MetaDataRegistration/eu_register.py
|
StanfordBioinformatics/encode_utils
|
f681f4d928d606d417557d6f3ed26f0affa45193
|
[
"MIT"
] | 3
|
2018-02-05T03:18:04.000Z
|
2019-01-16T21:40:19.000Z
|
encode_utils/MetaDataRegistration/eu_register.py
|
StanfordBioinformatics/encode_utils
|
f681f4d928d606d417557d6f3ed26f0affa45193
|
[
"MIT"
] | 28
|
2018-03-20T18:41:48.000Z
|
2022-03-31T23:16:02.000Z
|
encode_utils/MetaDataRegistration/eu_register.py
|
StanfordBioinformatics/encode_utils
|
f681f4d928d606d417557d6f3ed26f0affa45193
|
[
"MIT"
] | 12
|
2018-02-12T04:11:20.000Z
|
2021-12-09T15:56:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###
# © 2018 The Board of Trustees of the Leland Stanford Junior University
# Nathaniel Watson
# nathankw@stanford.edu
###
"""
Given a tab-delimited or JSON input file containing one or more records belonging to one of the profiles
listed on the ENCODE Portal (such as https://www.encodeproject.org/profiles/biosample.json),
either POSTS or PATCHES the records. The default is to POST each record; to PATCH instead, see
the ``--patch`` option.
When POSTING file records, the md5sum of each file will be calculated for you if you haven't
already provided the `md5sum` property. Then, after the POST operation completes, the actual file
will be uploaded to AWS S3. In order for this to work, you must set the `submitted_file_name`
property to the full, local path to your file to upload. Alternatively, you can set
`submitted_file_name` to and existing S3 object, i.e. s3://mybucket/reads.fastq.
Note that there is a special 'trick' defined in the ``encode_utils.connection.Connection()``
class that can be taken advantage of to simplify submission under certain profiles.
It concerns the `attachment` property in any profile that employs it, such as the `document`
profile. The trick works as follows: instead of constructing the `attachment` propery object
value as defined in the schema, simply use a single-key object of the following format::
{"path": "/path/to/myfile"}
and the `attachment` object will be constructed for you.
|
"""
import argparse
import json
import os
import re
import sys
import requests
import encode_utils.utils as euu
import encode_utils.connection as euc
from encode_utils.parent_argparser import dcc_login_parser
from encode_utils.profiles import Profiles
# Check that Python3 is being used
v = sys.version_info
if v < (3, 3):
raise Exception("Requires Python 3.3 or greater.")
#: RECORD_ID_FIELD is a special field that won't be skipped in the create_payload() function.
#: It is used when patching objects to indicate the identifier of the record to patch.
RECORD_ID_FIELD = "record_id"
def get_parser():
parser = argparse.ArgumentParser(
description = __doc__,
parents=[dcc_login_parser],
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-d", "--dry-run", action="store_true", help="""
Set this option to enable the dry-run feature, such that no modifications are performed on the
ENCODE Portal. This is useful if you'd like to inspect the logs or ensure the validity of
your input file.""")
parser.add_argument("--no-aliases", action="store_true", help="""
Setting this option is NOT advised. Set this option for doing a POST when your input file
doesn't contain an 'aliases' column, even though this property is supported in the corresponding
ENCODE profile.
When POSTING a record to a profile that includes the 'aliases' property, this package requires
the 'aliases' property be used for traceability purposes and because without this property,
it'll be very easy to create duplicate objects on the Portal. For example, you can easily
create the same biosample as many times as you want on the Portal when not providing an alias.""")
parser.add_argument(
"--no-upload-file",
action="store_true",
help="Don't upload files when POSTing file objects",
)
parser.add_argument("-p", "--profile_id", required=True, help="""
The ID of the profile to submit to, i.e. use 'genetic_modification' for
https://www.encodeproject.org/profiles/genetic_modification.json. The profile will be pulled down for
type-checking in order to type-cast any values in the input file to the proper type (i.e. some
values need to be submitted as integers, not strings).""")
parser.add_argument("-i", "--infile", required=True, help="""
The JSON input file or tab-delimited input file.
**The tab-delimited file format:**
Must have a field-header line as the first line.
Any lines after the header line that start with a '#' will be skipped, as well as any empty lines.
The field names must be exactly equal to the corresponding property names in the corresponding
profile. Non-scematic fields are allowed as long as they begin with a '#'; they will be
skipped. If a property has an array data type (as indicated in the profile's documentation
on the Portal), the array literals '[' and ']' are optional. Values within the array must
be comma-delimited. For example, if a property takes an array of strings, then you can use
either of these as the value:
1) str1,str2,str3
2) [str1,str2,str3]
On the other hand, if a property takes a JSON object as a value, then the value you enter must be
valid JSON. This is true anytime you have to specify a JSON object. Thus, if you are submitting a
genetic_modification and you have two 'introduced_tags' to provide, you can supply them in either
of the following two ways:
1) {"name": "eGFP", "location": "C-terminal"},{"name": "FLAG","C-terminal"}
2) [{"name": "eGFP", "location": "C-terminal"},{"name": "FLAG","C-terminal"}]
**The JSON input file**
Can be a single JSON object, or an array of JSON objects. Key names must match property names of
an ENCODE record type (profile).
**The following applies to either input file formats**
When patching objects, you must specify the 'record_id' field to indicate the identifier of the record.
Note that this a special field that is not present in the ENCODE schema, and doesn't use the '#'
prefix to mark it as non-schematic. Here you can specify any valid record identifier
(i.e. UUID, accession, alias).
Some profiles (most) require specification of the 'award' and 'lab' attributes. These may be set
as fields in the input file, or can be left out, in which case the default values for these
attributes will be pulled from the environment variables DCC_AWARD and DCC_LAB, respectively.
""")
parser.add_argument("-w", "--overwrite-array-values", action="store_true", help="""
Only has meaning in combination with the --patch option. When this is specified, it means that
any keys with array values will be overwritten on the ENCODE Portal with the corresponding value
to patch. The default action is to extend the array value with the patch value and then to remove
any duplicates.""")
parser.add_argument("-r", "--remove-property", help="""
Only has meaning in combination with the --rm-patch option. Properties specified in this argument
will be popped from the record fetched from the ENCODE portal. Can specify as comma delimited
string.""")
group = parser.add_mutually_exclusive_group()
group.add_argument("--patch", action="store_true", help="""
Presence of this option indicates to PATCH an existing DCC record rather than register a new one.""")
group.add_argument("--rm-patch", action="store_true", help="""
Presence of this option indicates to remove a property, as specified by the -r argument,
from an existing DCC record, and then PATCH it with the payload specified in -i.""")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.rm_patch and not args.remove_property:
parser.error("No properties to remove were specified. Use --patch if only patching is needed.")
if args.remove_property and not args.rm_patch:
parser.error("Properties to remove were specified, but --rm-patch flag was not set.")
profile_id = args.profile_id
dcc_mode = args.dcc_mode
dry_run = args.dry_run
no_aliases = args.no_aliases
overwrite_array_values = args.overwrite_array_values
if dcc_mode:
conn = euc.Connection(dcc_mode, dry_run)
else:
# Default dcc_mode taken from environment variable DCC_MODE.
conn = euc.Connection()
# Put conn into submit mode:
conn.set_submission(True)
schema = conn.profiles.get_profile_from_id(profile_id)
infile = args.infile
patch = args.patch
rmpatch = args.rm_patch
if args.remove_property is not None:
props_to_remove = args.remove_property.split(",")
gen = create_payloads(schema=schema, infile=infile)
for payload in gen:
if not patch and not rmpatch:
conn.post(
payload,
require_aliases=not no_aliases,
upload_file=not args.no_upload_file,
)
elif rmpatch:
record_id = payload.get(RECORD_ID_FIELD, False)
if not record_id:
raise ValueError(
"Can't patch payload {} since there isn't a '{}' field indicating an identifier for the record to be PATCHED.".format(
euu.print_format_dict(payload), RECORD_ID_FIELD))
payload.pop(RECORD_ID_FIELD)
payload.update({conn.ENCID_KEY: record_id})
conn.remove_and_patch(props=props_to_remove, patch=payload, extend_array_values=not overwrite_array_values)
elif patch:
record_id = payload.get(RECORD_ID_FIELD, False)
if not record_id:
raise ValueError(
"Can't patch payload {} since there isn't a '{}' field indicating an identifier for the record to be PATCHED.".format(
euu.print_format_dict(payload), RECORD_ID_FIELD))
payload.pop(RECORD_ID_FIELD)
payload.update({conn.ENCID_KEY: record_id})
conn.patch(payload=payload, extend_array_values=not overwrite_array_values)
def check_valid_json(prop, val, row_count):
"""
Runs json.loads(val) to ensure valid JSON.
Args:
val: str. A string load as JSON.
prop: str. Name of the schema property/field that stores the passed in val.
row_count: int. The line number from the input file that is currently being processed.
Raises:
ValueError: The input is malformed JSON.
"""
# Don't try to break down the individual pieces of a nested object. That will be too complext for this script, and will also
# be too complex for the end user to try and represent in some flattened way. Thus, require the end user to supply proper JSON
# for a nested object.
try:
json_val = json.loads(val)
if isinstance(json_val, list):
for item in json_val:
if not isinstance(item, dict):
raise ValueError
except ValueError:
print("Error: Invalid JSON in field '{}', row '{}'".format(prop, row_count))
raise
return json_val
def typecast(field_name, value, data_type, line_num):
"""
Converts the value to the specified data type. Used to convert string representations of integers
in the input file to integers, and string representations of booleans to booleans.
Args:
field_name: The name of the field in the input file whose value is being potentially typecast.
Used only in error messages.
value: The value to potentially typecast.
data_type: Specifies the data type of field_name as indicated in the ENCODE profile.
line_num: The current line number in the input file. Used only in error messages.
"""
if data_type == "integer":
return int(value)
elif data_type == "number":
# JSON Schema says that a number can by any numeric type.
# First check if integer, if not, treat as float.
try:
return int(value)
except ValueError:
# This will be raised if trying to convert a string representation of a float to an int.
return float(value)
elif data_type == "boolean":
value = value.lower()
if value not in ["true", "false"]:
raise Exception("Can't convert value '{}' in field '{}' on line {} to data type '{}'.".format(value, field_name, line_num, data_type))
value = json.loads(value)
return value
def create_payloads(schema, infile):
"""
First attempts to read the input file as JSON. If that fails, tries the TSV parser.
Args:
schema: `EncodeSchema`. The schema of the objects to be submitted.
"""
try:
with open(infile) as f:
payloads = json.load(f)
return create_payloads_from_json(schema, payloads)
except ValueError:
return create_payloads_from_tsv(schema, infile)
def create_payloads_from_json(schema, payloads):
"""
Generates payloads from a JSON file
Args:
schema: `EncodeSchema`. The schema of the objects to be submitted.
payloads: dict or list parsed from a JSON input file.
Yields: dict. The payload that can be used to either register or patch the
metadata for each row.
"""
if isinstance(payloads, dict):
payloads = [payloads]
for payload in payloads:
payload[euc.Connection.PROFILE_KEY] = schema.name
yield payload
def create_payloads_from_tsv(schema, infile):
"""
Generates the payload for each row in 'infile'.
Args:
schema: EncodeSchema. The schema of the objects to be submitted.
infile - str. Path to input file.
Yields : dict. The payload that can be used to either register or patch the metadata for each row.
"""
STR_REGX = re.compile(r'\'|"')
# Fetch the schema from the ENCODE Portal so we can set attr values to the
# right type when generating the payload (dict).
schema_props = [prop.name for prop in schema.properties]
field_index = {}
fh = open(infile, 'r')
header_fields = fh.readline().strip("\n").split("\t")
skip_field_indices = []
fi_count = -1 # field index count
for field in header_fields:
fi_count += 1
if field.startswith("#"): # non-schema field
skip_field_indices.append(fi_count)
continue
if field not in schema_props:
if field != RECORD_ID_FIELD:
raise Exception(
"Unknown field name '{}', which is not registered as a property in the specified schema at {}.".format(
field, schema.name))
field_index[fi_count] = field
line_count = 1 # already read header line
for line in fh:
line_count += 1
line = line.strip("\n")
if not line.strip() or line[0].startswith("#"):
continue
line = line.split("\t")
payload = {}
payload[euc.Connection.PROFILE_KEY] = schema.name
fi_count = -1
for val in line:
fi_count += 1
if fi_count in skip_field_indices:
continue
val = val.strip()
if not val:
# Then skip. For ex., the biosample schema has a 'date_obtained' property, and if that is
# empty it'll be treated as a formatting error, and the Portal will return a a 422.
continue
field = field_index[fi_count]
if field == RECORD_ID_FIELD:
payload[field] = val
continue
field_schema = schema.get_property_from_name(field).schema
schema_val_type = field_schema["type"]
if schema_val_type == "object":
# Must be proper JSON
val = check_valid_json(field, val, line_count)
elif schema_val_type == "array":
item_val_type = field_schema["items"]["type"]
if item_val_type == "object":
# Must be valid JSON
# Check if user supplied optional JSON array literal. If not, I'll add it.
if not val.startswith("["):
val = "[" + val
if not val.endswith("]"):
val += "]"
val = check_valid_json(field, val, line_count)
else:
# User is allowed to enter values in string literals. I'll remove them if I find them,
# since I'm splitting on the ',' to create a list of strings anyway:
val = STR_REGX.sub("", val)
# Remove optional JSON array literal since I'm tokenizing and then converting
# to an array regardless.
if val.startswith("["):
val = val[1:]
if val.endswith("]"):
val = val[:-1]
val = [x.strip() for x in val.split(",")]
# Type cast tokens if need be, i.e. to integers:
val = [typecast(field_name=field, value=x, data_type=item_val_type, line_num=line_count) for x in val if x]
else:
val = typecast(field_name=field, value=val, data_type=schema_val_type, line_num=line_count)
payload[field] = val
yield payload
if __name__ == "__main__":
main()
| 43.651282
| 146
| 0.658717
|
12b1db7a624f0dc1a6669293185c70ce513a904d
| 1,915
|
py
|
Python
|
docs/conf.py
|
chslink/antispam
|
0cf57251d036c46ae934504a0f7f952128bb4f73
|
[
"MIT"
] | 117
|
2015-12-10T17:53:21.000Z
|
2022-02-13T01:37:31.000Z
|
docs/conf.py
|
chslink/antispam
|
0cf57251d036c46ae934504a0f7f952128bb4f73
|
[
"MIT"
] | 6
|
2016-08-30T14:09:22.000Z
|
2021-06-04T22:08:16.000Z
|
docs/conf.py
|
chslink/antispam
|
0cf57251d036c46ae934504a0f7f952128bb4f73
|
[
"MIT"
] | 28
|
2015-12-16T18:35:42.000Z
|
2022-02-13T01:37:32.000Z
|
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
import time
import antispam
sys.path.insert(0, os.path.abspath(".."))
master_doc = "index"
project = "Antispam"
version = release = antispam.__version__
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_functions = [
# various modules
"doctests",
"main",
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| 29.921875
| 95
| 0.747258
|
b744abebccb01d0ebd476589486800962b928a2e
| 372
|
py
|
Python
|
m3uragen/image.py
|
TiBeN/m3uragen
|
45f6f7c76c7a345d01f43a152f1f815a9891441f
|
[
"Apache-2.0"
] | 1
|
2020-12-19T09:18:13.000Z
|
2020-12-19T09:18:13.000Z
|
m3uragen/image.py
|
TiBeN/m3uragen
|
45f6f7c76c7a345d01f43a152f1f815a9891441f
|
[
"Apache-2.0"
] | 1
|
2021-10-22T00:38:17.000Z
|
2021-12-15T13:48:18.000Z
|
m3uragen/image.py
|
TiBeN/m3uragen
|
45f6f7c76c7a345d01f43a152f1f815a9891441f
|
[
"Apache-2.0"
] | null | null | null |
"""Software image class"""
import os
class Image:
def __init__(self, path):
self.path = path
def extract_media_flag(self, media_flag_re):
match = media_flag_re.match(self.path.name)
if match:
return match.group(1)
def move_to(self, out_dir):
self.path = self.path.replace(os.path.join(out_dir, self.path.name))
| 21.882353
| 76
| 0.637097
|
140b4009aeb24fef058edb22b5629bd045398419
| 1,213
|
py
|
Python
|
Train/main.py
|
Sanduoo/OpenCV-Tensorflow
|
dd7e174fa358287b711845a658caca92ab9a3a61
|
[
"Apache-2.0"
] | 5
|
2018-12-14T10:21:16.000Z
|
2020-10-16T08:20:00.000Z
|
Train/main.py
|
Sanduoo/OpenCV-Tensorflow
|
dd7e174fa358287b711845a658caca92ab9a3a61
|
[
"Apache-2.0"
] | null | null | null |
Train/main.py
|
Sanduoo/OpenCV-Tensorflow
|
dd7e174fa358287b711845a658caca92ab9a3a61
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import cv2
import time
from grabscreen import grab_screen
from getkeys import key_check
import os
def keys_to_out(keys):
#[A,W,D] A = [1,0,0]
output = [0,0,0]
if 'A' in keys:
output[0] = 1
elif 'W' in keys:
output[1] = 1
else:
output[2] = 1
return output
file_name = 'training_data.npy'
if os.path.isfile(file_name):
print('File exists,loading previous data!')
training_data = list(np.load(file_name))
else:
print('File does not exists,starting fresh!')
training_data = []
def main():
for i in list(range(4))[::-1]:
print(i + 1)
time.sleep(1)
last_time = time.time()
while True:
screen = grab_screen(region=(0, 150, 800, 640))
screen = cv2.cvtColor(screen,cv2.COLOR_BGR2GRAY)
screen = cv2.resize(screen,(80,60))
keys = key_check()
output = keys_to_out(keys)
training_data.append([screen,output])
print('Frame took {} seconds'.format(time.time() - last_time))
last_time = time.time()
if len(training_data) % 500 == 0:
print(len(training_data))
np.save(file_name,training_data)
main()
| 21.280702
| 70
| 0.597692
|
f86c7ab4970b0686748b46e8df7b23d85b685133
| 2,057
|
py
|
Python
|
webware/MiddleKit/Tests/MKNone.mkmodel/TestEmpty.py
|
PeaceWorksTechnologySolutions/w4py3-middlekit
|
a9554e20c47010e7b0c0deee63e1786482c59a1c
|
[
"MIT"
] | 2
|
2020-10-31T09:12:58.000Z
|
2021-02-20T13:52:14.000Z
|
webware/MiddleKit/Tests/MKNone.mkmodel/TestEmpty.py
|
WebwareForPython/w4py3-middlekit
|
f740e2d2d3a5c225d6b8f9eb27ac08f8deed47e6
|
[
"MIT"
] | 2
|
2020-01-07T15:24:09.000Z
|
2020-01-08T15:39:57.000Z
|
webware/MiddleKit/Tests/MKNone.mkmodel/TestEmpty.py
|
PeaceWorksTechnologySolutions/w4py3-middlekit
|
a9554e20c47010e7b0c0deee63e1786482c59a1c
|
[
"MIT"
] | 1
|
2021-09-27T21:04:18.000Z
|
2021-09-27T21:04:18.000Z
|
def test(store):
from Foo import Foo
f = Foo()
# legal sets:
f.setRi(1)
f.setNi(2)
f.setRs('a')
f.setNs('b')
f.setNi(None)
f.setNs(None)
# illegal sets:
errMsg = 'Set None for required attribute, but no exception was raised.'
try:
f.setRi(None)
except Exception:
pass
else:
raise Exception(errMsg)
try:
f.setRs(None)
except Exception:
pass
else:
raise Exception(errMsg)
store.addObject(f)
store.saveChanges()
store.clear()
results = store.fetchObjectsOfClass(Foo)
assert len(results) == 1
f = results[0]
assert f.ri() == 1
assert f.ni() is None
assert f.rs() == 'a'
assert f.ns() is None
return
from webware.MiscUtils.DataTable import DataTable
dataSource = '''
b:int,i:int,l:long,f:float,s:string
0,0,0,0,0
0,0,0,0.0,0.0
1,1,1,1,a
0,-1,8589934592,-3.14,'x'
'''
data = DataTable()
data.readString(dataSource)
for values in data:
print(values)
t = Thing()
t.setB(values['b'])
t.setI(values['i'])
t.setL(values['l'])
t.setF(values['f'])
t.setS(values['s'])
store.addObject(t)
store.saveChanges()
# Try an immediate fetch
results = store.fetchObjectsOfClass(Thing)
assert len(results) == 1
# This tests the uniquing feature of MiddleKit:
assert id(results[0]) == id(t)
# Clear the store's in memory objects and try a fetch again
store.clear()
results = store.fetchObjectsOfClass(Thing)
assert len(results) == 1
assert results[0].allAttrs() == t.allAttrs()
# Make sure what we got from the store is what we put in
assert t.b() == values['b']
assert t.i() == values['i']
assert t.l() == values['l']
assert t.f() == values['f']
assert t.s() == values['s']
# Reset
store.clear()
store.executeSQLTransaction('delete from Thing;')
del t
| 22.11828
| 76
| 0.55615
|
2db5ec85b013138c52c34c6c1d714a9b7c51ce20
| 35,521
|
py
|
Python
|
brian2/tests/test_functions.py
|
Debilski/brian2
|
560377aba16d8ddaba55fd50432b1142f6233f66
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/tests/test_functions.py
|
Debilski/brian2
|
560377aba16d8ddaba55fd50432b1142f6233f66
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/tests/test_functions.py
|
Debilski/brian2
|
560377aba16d8ddaba55fd50432b1142f6233f66
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
import os
import pytest
from numpy.testing import assert_equal
from brian2 import *
from brian2.core.functions import timestep
from brian2.parsing.sympytools import str_to_sympy, sympy_to_str
from brian2.utils.logger import catch_logs
from brian2.tests.utils import assert_allclose
from brian2.codegen.generators import CodeGenerator
from brian2.codegen.codeobject import CodeObject
@pytest.mark.codegen_independent
def test_constants_sympy():
'''
Make sure that symbolic constants are understood correctly by sympy
'''
assert sympy_to_str(str_to_sympy('1.0/inf')) == '0'
assert sympy_to_str(str_to_sympy('sin(pi)')) == '0'
assert sympy_to_str(str_to_sympy('log(e)')) == '1'
@pytest.mark.standalone_compatible
def test_constants_values():
'''
Make sure that symbolic constants use the correct values in code
'''
G = NeuronGroup(3, 'v : 1')
G.v[0] = 'pi'
G.v[1] = 'e'
G.v[2] = 'inf'
run(0*ms)
assert_allclose(G.v[:], [np.pi, np.e, np.inf])
def test_math_functions():
'''
Test that math functions give the same result, regardless of whether used
directly or in generated Python or C++ code.
'''
default_dt = defaultclock.dt
test_array = np.array([-1, -0.5, 0, 0.5, 1])
def int_(x):
return array(x, dtype=int)
int_.__name__ = 'int'
with catch_logs() as _: # Let's suppress warnings about illegal values
# Functions with a single argument
for func in [cos, tan, sinh, cosh, tanh,
arcsin, arccos, arctan,
log, log10,
exp, np.sqrt,
np.ceil, np.floor, np.sign, int_]:
# Calculate the result directly
numpy_result = func(test_array)
# Calculate the result in a somewhat complicated way by using a
# subexpression in a NeuronGroup
if func.__name__ == 'absolute':
# we want to use the name abs instead of absolute
func_name = 'abs'
else:
func_name = func.__name__
G = NeuronGroup(len(test_array),
'''func = {func}(variable) : 1
variable : 1'''.format(func=func_name))
G.variable = test_array
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert_allclose(numpy_result, mon.func_.flatten(),
err_msg='Function %s did not return the correct values' % func.__name__)
# Functions/operators
scalar = 3
for func, operator in [(np.power, '**'), (np.mod, '%')]:
# Calculate the result directly
numpy_result = func(test_array, scalar)
# Calculate the result in a somewhat complicated way by using a
# subexpression in a NeuronGroup
G = NeuronGroup(len(test_array),
'''func = variable {op} scalar : 1
variable : 1'''.format(op=operator))
G.variable = test_array
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert_allclose(numpy_result, mon.func_.flatten(),
err_msg='Function %s did not return the correct values' % func.__name__)
@pytest.mark.standalone_compatible
def test_clip():
G = NeuronGroup(4, '''
clipexpr1 = clip(integer_var1, 0, 1) : integer
clipexpr2 = clip(integer_var2, -0.5, 1.5) : integer
clipexpr3 = clip(float_var1, 0, 1) : 1
clipexpr4 = clip(float_var2, -0.5, 1.5) : 1
integer_var1 : integer
integer_var2 : integer
float_var1 : 1
float_var2 : 1
''')
G.integer_var1 = [0, 1, -1, 2]
G.integer_var2 = [0, 1, -1, 2]
G.float_var1 = [0., 1., -1., 2.]
G.float_var2 = [0., 1., -1., 2.]
s_mon = StateMonitor(G, ['clipexpr1', 'clipexpr2',
'clipexpr3', 'clipexpr4'], record=True)
run(defaultclock.dt)
assert_equal(s_mon.clipexpr1.flatten(), [0, 1, 0, 1])
assert_equal(s_mon.clipexpr2.flatten(), [0, 1, 0, 1])
assert_allclose(s_mon.clipexpr3.flatten(), [0, 1, 0, 1])
assert_allclose(s_mon.clipexpr4.flatten(), [0, 1, -0.5, 1.5])
@pytest.mark.standalone_compatible
def test_bool_to_int():
# Test that boolean expressions and variables are correctly converted into
# integers
G = NeuronGroup(2, '''
intexpr1 = int(bool_var) : integer
intexpr2 = int(float_var > 1.0) : integer
bool_var : boolean
float_var : 1
''')
G.bool_var = [True, False]
G.float_var = [2.0, 0.5]
s_mon = StateMonitor(G, ['intexpr1', 'intexpr2'], record=True)
run(defaultclock.dt)
assert_equal(s_mon.intexpr1.flatten(), [1, 0])
assert_equal(s_mon.intexpr2.flatten(), [1, 0])
@pytest.mark.codegen_independent
def test_timestep_function():
dt = defaultclock.dt_
# Check that multiples of dt end up in the correct time step
t = np.arange(100000)*dt
assert_equal(timestep(t, dt), np.arange(100000))
# Scalar values should stay scalar
ts = timestep(0.0005, 0.0001)
assert np.isscalar(ts) and ts == 5
# Length-1 arrays should stay arrays
ts = timestep(np.array([0.0005]), 0.0001)
assert ts.shape == (1,) and ts == 5
@pytest.mark.standalone_compatible
def test_timestep_function_during_run():
group = NeuronGroup(2, '''ref_t : second
ts = timestep(ref_t, dt) + timestep(t, dt) : integer''')
group.ref_t = [-1e4*second, 5*defaultclock.dt]
mon = StateMonitor(group, 'ts', record=True)
run(5*defaultclock.dt)
assert all(mon.ts[0] <= -1e4)
assert_equal(mon.ts[1], [5, 6, 7, 8, 9])
@pytest.mark.standalone_compatible
def test_user_defined_function():
@implementation('cpp',"""
inline double usersin(double x)
{
return sin(x);
}
""")
@implementation('cython', '''
cdef double usersin(double x):
return sin(x)
''')
@check_units(x=1, result=1)
def usersin(x):
return np.sin(x)
default_dt = defaultclock.dt
test_array = np.array([0, 1, 2, 3])
G = NeuronGroup(len(test_array),
'''func = usersin(variable) : 1
variable : 1''')
G.variable = test_array
mon = StateMonitor(G, 'func', record=True)
run(default_dt)
assert_allclose(np.sin(test_array), mon.func_.flatten())
def test_user_defined_function_units():
'''
Test the preparation of functions for use in code with check_units.
'''
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
def nothing_specified(x, y, z):
return x*(y+z)
no_result_unit = check_units(x=1, y=second, z=second)(nothing_specified)
one_arg_missing = check_units(x=1, z=second, result=second)(nothing_specified)
all_specified = check_units(x=1, y=second, z=second, result=second)(nothing_specified)
G = NeuronGroup(1, '''a : 1
b : second
c : second''',
namespace={'nothing_specified': nothing_specified,
'no_result_unit': no_result_unit,
'one_arg_missing': one_arg_missing,
'all_specified': all_specified})
net = Network(G)
net.run(0*ms) # make sure we have a clock and therefore a t
G.c = 'all_specified(a, b, t)'
with pytest.raises(ValueError):
setattr(G, 'c', 'one_arg_missing(a, b, t)')
with pytest.raises(ValueError):
setattr(G, 'c', 'no_result_unit(a, b, t)')
with pytest.raises(KeyError):
setattr(G, 'c', 'nothing_specified(a, b, t)')
with pytest.raises(DimensionMismatchError):
setattr(G, 'a', 'all_specified(a, b, t)')
with pytest.raises(DimensionMismatchError):
setattr(G, 'a', 'all_specified(b, a, t)')
def test_simple_user_defined_function():
# Make sure that it's possible to use a Python function directly, without
# additional wrapping
@check_units(x=1, result=1)
def usersin(x):
return np.sin(x)
usersin.stateless = True
default_dt = defaultclock.dt
test_array = np.array([0, 1, 2, 3])
G = NeuronGroup(len(test_array),
'''func = usersin(variable) : 1
variable : 1''',
codeobj_class=NumpyCodeObject)
G.variable = test_array
mon = StateMonitor(G, 'func', record=True, codeobj_class=NumpyCodeObject)
net = Network(G, mon)
net.run(default_dt)
assert_allclose(np.sin(test_array), mon.func_.flatten())
# Check that it raises an error for C++
try:
import scipy.weave
G = NeuronGroup(len(test_array),
'''func = usersin(variable) : 1
variable : 1''',
codeobj_class=WeaveCodeObject)
mon = StateMonitor(G, 'func', record=True,
codeobj_class=WeaveCodeObject)
net = Network(G, mon)
with pytest.raises(NotImplementedError):
net.run(0.1*ms)
except ImportError:
pass
def test_manual_user_defined_function():
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
default_dt = defaultclock.dt
# User defined function without any decorators
def foo(x, y):
return x + y + 3*volt
orig_foo = foo
# Since the function is not annotated with check units, we need to specify
# both the units of the arguments and the return unit
with pytest.raises(ValueError):
Function(foo, return_unit=volt)
with pytest.raises(ValueError):
Function(foo, arg_units=[volt, volt])
foo = Function(foo, arg_units=[volt, volt], return_unit=volt)
assert foo(1*volt, 2*volt) == 6*volt
# Incorrect argument units
group = NeuronGroup(1, '''
dv/dt = foo(x, y)/ms : volt
x : 1
y : 1''')
net = Network(group)
with pytest.raises(DimensionMismatchError):
net.run(0*ms, namespace={ 'foo': foo})
# Incorrect output unit
group = NeuronGroup(1, '''
dv/dt = foo(x, y)/ms : 1
x : volt
y : volt''')
net = Network(group)
with pytest.raises(DimensionMismatchError):
net.run(0*ms, namespace={'foo': foo})
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert mon[0].func == [6] * volt
# discard units
foo.implementations.add_numpy_implementation(orig_foo,
discard_units=True)
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert mon[0].func == [6] * volt
def test_manual_user_defined_function_weave():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
# User defined function without any decorators
def foo(x, y):
return x + y + 3*volt
foo = Function(foo, arg_units=[volt, volt], return_unit=volt)
code = {'support_code': '''
inline double foo(const double x, const double y)
{
return x + y + 3;
}
'''}
foo.implementations.add_implementation('cpp', code)
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_manual_user_defined_function_cpp_standalone_compiler_args():
set_device('cpp_standalone', directory=None)
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', # just check whether we can specify the supported compiler args,
# only the define macro is actually used
headers=[], sources=[], libraries=[], include_dirs=[],
library_dirs=[], runtime_library_dirs=[],
define_macros=[('_THREE', '3')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_manual_user_defined_function_cpp_standalone_wrong_compiler_args1():
set_device('cpp_standalone', directory=None)
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', some_arg=[]) # non-existing argument
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(ValueError):
net.run(defaultclock.dt, namespace={'foo': foo})
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_manual_user_defined_function_cpp_standalone_wrong_compiler_args2():
set_device('cpp_standalone', directory=None)
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', headers='<stdio.h>') # existing argument, wrong value type
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(TypeError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_weave_compiler_args():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', # just check whether we can specify the supported compiler args,
# only the define macro is actually used
headers=[], sources=[], libraries=[], include_dirs=[],
library_dirs=[], runtime_library_dirs=[],
define_macros=[('_THREE', '3')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
def test_manual_user_defined_function_weave_wrong_compiler_args1():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', some_arg=[]) # non-existing argument
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(ValueError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_weave_wrong_compiler_args2():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', headers='<stdio.h>') # existing argument, wrong value type
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(TypeError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_cython_compiler_args():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
@implementation('cython', '''
cdef double foo(double x, const double y):
return x + y + 3
''', # just check whether we can specify the supported compiler args,
libraries=[], include_dirs=[], library_dirs=[], runtime_library_dirs=[])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
def test_manual_user_defined_function_cython_wrong_compiler_args1():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
@implementation('cython', '''
cdef double foo(double x, const double y):
return x + y + 3
''', some_arg=[]) # non-existing argument
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(ValueError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_cython_wrong_compiler_args2():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
@implementation('cython', '''
cdef double foo(double x, const double y):
return x + y + 3
''', libraries='cstdio') # existing argument, wrong value type
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(TypeError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_external_function_cython():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
this_dir = os.path.abspath(os.path.dirname(__file__))
@implementation('cython', 'from func_def_cython cimport foo',
sources=[os.path.join(this_dir, 'func_def_cython.pyx')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
def test_external_function_weave():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
this_dir = os.path.abspath(os.path.dirname(__file__))
@implementation('cpp', '//all code in func_def_cpp.cpp',
headers=['"func_def_cpp.h"'],
include_dirs=[this_dir],
sources=[os.path.join(this_dir, 'func_def_cpp.cpp')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_external_function_cpp_standalone():
set_device('cpp_standalone', directory=None)
this_dir = os.path.abspath(os.path.dirname(__file__))
@implementation('cpp', '//all code in func_def_cpp.cpp',
headers=['"func_def_cpp.h"'],
include_dirs=[this_dir],
sources=[os.path.join(this_dir, 'func_def_cpp.cpp')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.codegen_independent
def test_user_defined_function_discarding_units():
# A function with units that should discard units also inside the function
@implementation('numpy', discard_units=True)
@check_units(v=volt, result=volt)
def foo(v):
return v + 3*volt # this normally raises an error for unitless v
assert foo(5*volt) == 8*volt
# Test the function that is used during a run
assert foo.implementations[NumpyCodeObject].get_code(None)(5) == 8
@pytest.mark.codegen_independent
def test_user_defined_function_discarding_units_2():
# Add a numpy implementation explicitly (as in TimedArray)
unit = volt
@check_units(v=volt, result=unit)
def foo(v):
return v + 3*unit # this normally raises an error for unitless v
foo = Function(pyfunc=foo)
def unitless_foo(v):
return v + 3
foo.implementations.add_implementation('numpy', code=unitless_foo)
assert foo(5*volt) == 8*volt
# Test the function that is used during a run
assert foo.implementations[NumpyCodeObject].get_code(None)(5) == 8
@pytest.mark.codegen_independent
def test_function_implementation_container():
import brian2.codegen.targets as targets
class ACodeGenerator(CodeGenerator):
class_name = 'A Language'
class BCodeGenerator(CodeGenerator):
class_name = 'B Language'
class ACodeObject(CodeObject):
generator_class = ACodeGenerator
class_name = 'A'
class A2CodeObject(CodeObject):
generator_class = ACodeGenerator
class_name = 'A2'
class BCodeObject(CodeObject):
generator_class = BCodeGenerator
class_name = 'B'
# Register the code generation targets
_previous_codegen_targets = set(targets.codegen_targets)
targets.codegen_targets = {ACodeObject, BCodeObject}
@check_units(x=volt, result=volt)
def foo(x):
return x
f = Function(foo)
container = f.implementations
# inserting into the container with a CodeGenerator class
container.add_implementation(BCodeGenerator, code='implementation B language')
assert container[BCodeGenerator].get_code(None) == 'implementation B language'
# inserting into the container with a CodeObject class
container.add_implementation(ACodeObject, code='implementation A CodeObject')
assert container[ACodeObject].get_code(None) == 'implementation A CodeObject'
# inserting into the container with a name of a CodeGenerator
container.add_implementation('A Language', 'implementation A Language')
assert container['A Language'].get_code(None) == 'implementation A Language'
assert container[ACodeGenerator].get_code(None) == 'implementation A Language'
assert container[A2CodeObject].get_code(None) == 'implementation A Language'
# inserting into the container with a name of a CodeObject
container.add_implementation('B', 'implementation B CodeObject')
assert container['B'].get_code(None) == 'implementation B CodeObject'
assert container[BCodeObject].get_code(None) == 'implementation B CodeObject'
with pytest.raises(KeyError):
container['unknown']
# some basic dictionary properties
assert len(container) == 4
assert set((key for key in container)) == {'A Language', 'B', ACodeObject,
BCodeGenerator}
# Restore the previous codegeneration targets
targets.codegen_targets = _previous_codegen_targets
def test_function_dependencies_weave():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
float foo(float x)
{
return 42*0.001;
}''')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cpp', '''
float bar(float x)
{
return 2*foo(x);
}''', dependencies={'foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_weave_rename():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
float _foo(float x)
{
return 42*0.001;
}''', name='_foo')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cpp', '''
float bar(float x)
{
return 2*my_foo(x);
}''', dependencies={'my_foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_cython():
if prefs.codegen.target != 'cython':
pytest.skip('cython-only test')
@implementation('cython', '''
cdef float foo(float x):
return 42*0.001
''')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cython', '''
cdef float bar(float x):
return 2*foo(x)
''', dependencies={'foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_cython_rename():
if prefs.codegen.target != 'cython':
pytest.skip('cython-only test')
@implementation('cython', '''
cdef float _foo(float x):
return 42*0.001
''', name='_foo')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cython', '''
cdef float bar(float x):
return 2*my_foo(x)
''', dependencies={'my_foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_numpy():
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
@implementation('cpp', '''
float foo(float x)
{
return 42*0.001;
}''')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for C++ and an
# implementation for numpy that makes use of the previous function.
# Note that we don't need to use the explicit dependencies mechanism for
# numpy, since the Python function stores a reference to the referenced
# function directly
@implementation('cpp', '''
float bar(float x)
{
return 84*0.001;
}''')
@check_units(x=volt, result=volt)
def bar(x):
return 2*foo(x)
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
@pytest.mark.standalone_compatible
def test_repeated_function_dependencies():
# each of the binomial functions adds randn as a depency, see #988
test_neuron = NeuronGroup(1, 'x : 1',
namespace={'bino_1': BinomialFunction(10, 0.5),
'bino_2': BinomialFunction(10, 0.6)})
test_neuron.x = 'bino_1()+bino_2()'
run(0 * ms)
@pytest.mark.standalone_compatible
def test_binomial():
binomial_f_approximated = BinomialFunction(100, 0.1, approximate=True)
binomial_f = BinomialFunction(100, 0.1, approximate=False)
# Just check that it does not raise an error and that it produces some
# values
G = NeuronGroup(1, '''x : 1
y : 1''')
G.run_regularly('''x = binomial_f_approximated()
y = binomial_f()''')
mon = StateMonitor(G, ['x', 'y'], record=0)
run(1*ms)
assert np.var(mon[0].x) > 0
assert np.var(mon[0].y) > 0
@pytest.mark.standalone_compatible
def test_poisson():
# Just check that it does not raise an error and that it produces some
# values
G = NeuronGroup(5, '''l : 1
x : integer
y : integer
z : integer
''')
G.l = [0, 1, 5, 15, 25]
G.run_regularly('''x = poisson(l)
y = poisson(5)
z = poisson(0)''')
mon = StateMonitor(G, ['x', 'y', 'z'], record=True)
run(100*defaultclock.dt)
assert_equal(mon.x[0], 0)
assert all(np.var(mon.x[1:], axis=1) > 0)
assert all(np.var(mon.y, axis=1) > 0)
assert_equal(mon.z, 0)
def test_declare_types():
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
@declare_types(a='integer', b='float', result='highest')
def f(a, b):
return a*b
assert f._arg_types==['integer', 'float']
assert f._return_type == 'highest'
@declare_types(b='float')
def f(a, b, c):
return a*b*c
assert f._arg_types==['any', 'float', 'any']
assert f._return_type == 'float'
def bad_argtype():
@declare_types(b='floating')
def f(a, b, c):
return a*b*c
with pytest.raises(ValueError):
bad_argtype()
def bad_argname():
@declare_types(d='floating')
def f(a, b, c):
return a*b*c
with pytest.raises(ValueError):
bad_argname()
@check_units(a=volt, b=1)
@declare_types(a='float', b='integer')
def f(a, b):
return a*b
@declare_types(a='float', b='integer')
@check_units(a=volt, b=1)
def f(a, b):
return a*b
def bad_units():
@declare_types(a='integer', b='float')
@check_units(a=volt, b=1, result=volt)
def f(a, b):
return a*b
eqs = '''
dv/dt = f(v, 1)/second : 1
'''
G = NeuronGroup(1, eqs)
Network(G).run(1*ms)
with pytest.raises(TypeError):
bad_units()
def bad_type():
@implementation('numpy', discard_units=True)
@declare_types(a='float', result='float')
@check_units(a=1, result=1)
def f(a):
return a
eqs = '''
a : integer
dv/dt = f(a)*v/second : 1
'''
G = NeuronGroup(1, eqs)
Network(G).run(1*ms)
with pytest.raises(TypeError):
bad_type()
def test_multiple_stateless_function_calls():
# Check that expressions such as rand() + rand() (which might be incorrectly
# simplified to 2*rand()) raise an error
G = NeuronGroup(1, 'dv/dt = (rand() - rand())/second : 1')
net = Network(G)
with pytest.raises(NotImplementedError):
net.run(0*ms)
G2 = NeuronGroup(1, 'v:1', threshold='v>1', reset='v=rand() - rand()')
net2 = Network(G2)
with pytest.raises(NotImplementedError):
net2.run(0*ms)
G3 = NeuronGroup(1, 'v:1')
G3.run_regularly('v = rand() - rand()')
net3 = Network(G3)
with pytest.raises(NotImplementedError):
net3.run(0*ms)
if __name__ == '__main__':
from brian2 import prefs
# prefs.codegen.target = 'numpy'
import time
from _pytest.outcomes import Skipped
for f in [
test_constants_sympy,
test_constants_values,
test_math_functions,
test_clip,
test_bool_to_int,
test_timestep_function,
test_timestep_function_during_run,
test_user_defined_function,
test_user_defined_function_units,
test_simple_user_defined_function,
test_manual_user_defined_function,
test_manual_user_defined_function_weave,
test_external_function_cython,
test_external_function_weave,
test_user_defined_function_discarding_units,
test_user_defined_function_discarding_units_2,
test_function_implementation_container,
test_function_dependencies_numpy,
test_function_dependencies_weave,
test_function_dependencies_weave_rename,
test_function_dependencies_cython,
test_function_dependencies_cython_rename,
test_repeated_function_dependencies,
test_binomial,
test_poisson,
test_declare_types,
test_multiple_stateless_function_calls,
]:
try:
start = time.time()
f()
print('Test', f.__name__, 'took', time.time()-start)
except Skipped as e:
print('Skipping test', f.__name__, e)
| 32.350638
| 100
| 0.585485
|
15310979e7217f14b08fbc4c314d16f7d9279546
| 12,863
|
py
|
Python
|
train_semseg.py
|
dsw-jlu-rgzn/Pointnet_Pointnet2_pytorch
|
6f61a47729c93964580aa6c89b0f2e16dd3f4324
|
[
"MIT"
] | 1
|
2022-03-11T08:34:38.000Z
|
2022-03-11T08:34:38.000Z
|
train_semseg.py
|
dsw-jlu-rgzn/Pointnet_Pointnet2_pytorch
|
6f61a47729c93964580aa6c89b0f2e16dd3f4324
|
[
"MIT"
] | null | null | null |
train_semseg.py
|
dsw-jlu-rgzn/Pointnet_Pointnet2_pytorch
|
6f61a47729c93964580aa6c89b0f2e16dd3f4324
|
[
"MIT"
] | null | null | null |
"""
Author: Benny
Date: Nov 2019
"""
import argparse
import os
from data_utils.S3DISDataLoader import S3DISDataset
import torch
import datetime
import logging
from pathlib import Path
import sys
import importlib
import shutil
from tqdm import tqdm
import provider
import numpy as np
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))##
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
classes = ['ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'table', 'chair', 'sofa', 'bookcase',
'board', 'clutter']
class2label = {cls: i for i, cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i, cat in enumerate(seg_classes.keys()):
seg_label_to_cat[i] = cat
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace=True
def parse_args():
parser = argparse.ArgumentParser('Model')
parser.add_argument('--model', type=str, default='pointnet_sem_seg', help='model name [default: pointnet_sem_seg]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--epoch', default=32, type=int, help='Epoch to run [default: 32]')
parser.add_argument('--learning_rate', default=0.001, type=float, help='Initial learning rate [default: 0.001]')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--optimizer', type=str, default='Adam', help='Adam or SGD [default: Adam]')
parser.add_argument('--log_dir', type=str, default=None, help='Log path [default: None]')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='weight decay [default: 1e-4]')
parser.add_argument('--npoint', type=int, default=4096, help='Point Number [default: 4096]')
parser.add_argument('--step_size', type=int, default=10, help='Decay step for lr decay [default: every 10 epochs]')
parser.add_argument('--lr_decay', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')
return parser.parse_args()
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))#
experiment_dir = Path('./log/')
experiment_dir.mkdir(exist_ok=True)
experiment_dir = experiment_dir.joinpath('sem_seg')
experiment_dir.mkdir(exist_ok=True)
if args.log_dir is None:
experiment_dir = experiment_dir.joinpath(timestr)
else:
experiment_dir = experiment_dir.joinpath(args.log_dir)
experiment_dir.mkdir(exist_ok=True)
checkpoints_dir = experiment_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = experiment_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
root = 'data/stanford_indoor3d/'
NUM_CLASSES = 13
NUM_POINT = args.npoint
BATCH_SIZE = args.batch_size
print("start loading training data ...")
TRAIN_DATASET = S3DISDataset(split='train', data_root=root, num_point=NUM_POINT, test_area=args.test_area, block_size=1.0, sample_rate=1.0, transform=None)
print("start loading test data ...")
TEST_DATASET = S3DISDataset(split='test', data_root=root, num_point=NUM_POINT, test_area=args.test_area, block_size=1.0, sample_rate=1.0, transform=None)
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=BATCH_SIZE, shuffle=True, num_workers=10,
pin_memory=True, drop_last=True,
worker_init_fn=lambda x: np.random.seed(x + int(time.time())))
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=BATCH_SIZE, shuffle=False, num_workers=10,
pin_memory=True, drop_last=True)
weights = torch.Tensor(TRAIN_DATASET.labelweights).cuda()
log_string("The number of training data is: %d" % len(TRAIN_DATASET))
log_string("The number of test data is: %d" % len(TEST_DATASET))
'''MODEL LOADING'''
MODEL = importlib.import_module(args.model)
shutil.copy('models/%s.py' % args.model, str(experiment_dir))
shutil.copy('models/pointnet2_utils.py', str(experiment_dir))
classifier = MODEL.get_model(NUM_CLASSES).cuda()
criterion = MODEL.get_loss().cuda()
classifier.apply(inplace_relu)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
torch.nn.init.xavier_normal_(m.weight.data)
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find('Linear') != -1:
torch.nn.init.xavier_normal_(m.weight.data)
torch.nn.init.constant_(m.bias.data, 0.0)
try:
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
classifier = classifier.apply(weights_init)
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=args.learning_rate, momentum=0.9)
def bn_momentum_adjust(m, momentum):
if isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm1d):
m.momentum = momentum
LEARNING_RATE_CLIP = 1e-5
MOMENTUM_ORIGINAL = 0.1
MOMENTUM_DECCAY = 0.5
MOMENTUM_DECCAY_STEP = args.step_size
global_epoch = 0
best_iou = 0
for epoch in range(start_epoch, args.epoch):
'''Train on chopped scenes'''
log_string('**** Epoch %d (%d/%s) ****' % (global_epoch + 1, epoch + 1, args.epoch))
lr = max(args.learning_rate * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)
log_string('Learning rate:%f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY ** (epoch // MOMENTUM_DECCAY_STEP))
if momentum < 0.01:
momentum = 0.01
print('BN momentum updated to: %f' % momentum)
classifier = classifier.apply(lambda x: bn_momentum_adjust(x, momentum))
num_batches = len(trainDataLoader)
total_correct = 0
total_seen = 0
loss_sum = 0
classifier = classifier.train()
for i, (points, target) in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
optimizer.zero_grad()
points = points.data.numpy()
points[:, :, :3] = provider.rotate_point_cloud_z(points[:, :, :3])
points = torch.Tensor(points)
points, target = points.float().cuda(), target.long().cuda()
points = points.transpose(2, 1)
seg_pred, trans_feat = classifier(points)
seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
batch_label = target.view(-1, 1)[:, 0].cpu().data.numpy()
target = target.view(-1, 1)[:, 0]
loss = criterion(seg_pred, target, trans_feat, weights)
loss.backward()
optimizer.step()
pred_choice = seg_pred.cpu().data.max(1)[1].numpy()
correct = np.sum(pred_choice == batch_label)
total_correct += correct
total_seen += (BATCH_SIZE * NUM_POINT)
loss_sum += loss
log_string('Training mean loss: %f' % (loss_sum / num_batches))
log_string('Training accuracy: %f' % (total_correct / float(total_seen)))
if epoch % 5 == 0:
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': epoch,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
log_string('Saving model....')
'''Evaluate on chopped scenes'''
with torch.no_grad():
num_batches = len(testDataLoader)
total_correct = 0
total_seen = 0
loss_sum = 0
labelweights = np.zeros(NUM_CLASSES)
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]
classifier = classifier.eval()
log_string('---- EPOCH %03d EVALUATION ----' % (global_epoch + 1))
for i, (points, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
points = points.data.numpy()
points = torch.Tensor(points)
points, target = points.float().cuda(), target.long().cuda()
points = points.transpose(2, 1)
seg_pred, trans_feat = classifier(points)
pred_val = seg_pred.contiguous().cpu().data.numpy()
seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
batch_label = target.cpu().data.numpy()
target = target.view(-1, 1)[:, 0]
loss = criterion(seg_pred, target, trans_feat, weights)
loss_sum += loss
pred_val = np.argmax(pred_val, 2)
correct = np.sum((pred_val == batch_label))
total_correct += correct
total_seen += (BATCH_SIZE * NUM_POINT)
tmp, _ = np.histogram(batch_label, range(NUM_CLASSES + 1))
labelweights += tmp
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label == l))
total_correct_class[l] += np.sum((pred_val == l) & (batch_label == l))
total_iou_deno_class[l] += np.sum(((pred_val == l) | (batch_label == l)))
labelweights = labelweights.astype(np.float32) / np.sum(labelweights.astype(np.float32))
mIoU = np.mean(np.array(total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float) + 1e-6))
log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
log_string('eval point avg class IoU: %f' % (mIoU))
log_string('eval point accuracy: %f' % (total_correct / float(total_seen)))
log_string('eval point avg class acc: %f' % (
np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))
iou_per_class_str = '------- IoU --------\n'
for l in range(NUM_CLASSES):
iou_per_class_str += 'class %s weight: %.3f, IoU: %.3f \n' % (
seg_label_to_cat[l] + ' ' * (14 - len(seg_label_to_cat[l])), labelweights[l - 1],
total_correct_class[l] / float(total_iou_deno_class[l]))
log_string(iou_per_class_str)
log_string('Eval mean loss: %f' % (loss_sum / num_batches))
log_string('Eval accuracy: %f' % (total_correct / float(total_seen)))
if mIoU >= best_iou:
best_iou = mIoU
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': epoch,
'class_avg_iou': mIoU,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
log_string('Saving model....')
log_string('Best mIoU: %f' % best_iou)
global_epoch += 1
if __name__ == '__main__':
args = parse_args()
main(args)
| 43.60339
| 159
| 0.613931
|
5d14d1dd31f44e62482cdf0ac16fc5eb1cafa7c3
| 978
|
py
|
Python
|
fun/linked.py
|
iganichev/scratch
|
e570f77abb855a21ec4ef893b5cfc97d331d0ce5
|
[
"MIT"
] | null | null | null |
fun/linked.py
|
iganichev/scratch
|
e570f77abb855a21ec4ef893b5cfc97d331d0ce5
|
[
"MIT"
] | null | null | null |
fun/linked.py
|
iganichev/scratch
|
e570f77abb855a21ec4ef893b5cfc97d331d0ce5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
carry = 0
c1 = l1
c2 = l2
r = None
head = None
while True:
if c1 is None and c2 is None and carry == 0:
return head
elif c1 is None and c2 is None:
r.next = ListNode(1)
return head
v1 = c1.val if c1 else 0
c1 = c1.next if c1 else None
v2 = c2.val if c2 else 0
c2 = c2.next if c2 else None
n = ListNode((v1 + v2 + carry) % 10)
carry = (carry + v1 + v2) / 10
head = head or n
if r is not None:
r.next = n
r = n
| 24.45
| 56
| 0.45501
|
5bfac40ea42037ccd1118dab9e2bd2b8cdba861f
| 5,513
|
py
|
Python
|
src/dynamic_graph_head/vicon.py
|
JerryWuZiJie/dynamic_graph_head
|
bd2f035a62cd282114f45e9635bfd5664670ab0d
|
[
"BSD-3-Clause"
] | null | null | null |
src/dynamic_graph_head/vicon.py
|
JerryWuZiJie/dynamic_graph_head
|
bd2f035a62cd282114f45e9635bfd5664670ab0d
|
[
"BSD-3-Clause"
] | null | null | null |
src/dynamic_graph_head/vicon.py
|
JerryWuZiJie/dynamic_graph_head
|
bd2f035a62cd282114f45e9635bfd5664670ab0d
|
[
"BSD-3-Clause"
] | null | null | null |
"""__init__
License: BSD 3-Clause License
Copyright (C) 2021, New York University
Copyright note valid unless otherwise stated in individual files.
All rights reserved.
"""
import numpy as np
import pinocchio as pin
try:
from vicon_sdk_cpp import ViconClient, ViconFrame
has_real_vicon = True
except:
has_real_vicon = False
class SimVicon:
def __init__(self, objects, delay_dt=0, noise_data_std={}):
"""
Args:
noise_model: Std of the noise to apply to the measurements.
"""
self.objects = objects
self.bias_xyz = np.zeros((len(objects), 3))
self.vicon_frames = {}
self.update_delay(delay_dt)
self.update_noise_data(noise_data_std)
self.use_delay = True
self.use_noise_model = True
def update_delay(self, delay_dt):
self.delay_dt = delay_dt
length = delay_dt + 1
self.fill_history = True
# For each object, setup a hisotry for position and body velocity.
self.history = {}
for i, obj in enumerate(self.objects):
self.history[obj] = {
'position': np.zeros((length, 7)),
'body_velocity': np.zeros((length, 6))
}
def update_noise_data(self, noise_data_std={}):
self.noise_data_std = noise_data_std
if not 'position_xyzrpy' in noise_data_std:
self.noise_data_std['position_xyzrpy'] = np.zeros(6)
if not 'body_velocity' in noise_data_std:
self.noise_data_std['body_velocity'] = np.zeros(6)
def apply_noise_model(self, pos, vel):
def sample_noise(entry):
noise_var = self.noise_data_std[entry]**2
return np.random.multivariate_normal(np.zeros_like(noise_var), np.diag(noise_var))
noise_pos = sample_noise('position_xyzrpy')
se3 = pin.XYZQUATToSE3(pos)
se3.translation += noise_pos[:3]
se3.rotation = se3.rotation @ pin.rpy.rpyToMatrix(*noise_pos[3:])
pos = pin.SE3ToXYZQUAT(se3)
vel += sample_noise('body_velocity')
return pos, vel
def update(self, thread_head):
# Write the position and velocity to the history buffer for each
# tracked object.
history = self.history
self.write_idx = thread_head.ti % (self.delay_dt + 1)
self.read_idx = (thread_head.ti + 1) % (self.delay_dt + 1)
for i, obj in enumerate(self.objects):
robot, frame = obj.split('/')
assert robot == frame, "Not supporting other frames right now."
# Seek the head with the vicon object.
for name, head in thread_head.heads.items():
if head._vicon_name == robot:
pos, vel = self.apply_noise_model(
head._sensor__vicon_base_position.copy(),
head._sensor__vicon_base_velocity.copy())
# At the first timestep, filll the full history.
if self.fill_history:
self.fill_history = False
history[obj]['position'][:] = pos
history[obj]['body_velocity'][:] = vel
else:
history[obj]['position'][self.write_idx] = pos
history[obj]['body_velocity'][self.write_idx] = vel
self.vicon_frames[obj] = {
'idx': i,
'head': head,
}
def get_state(self, vicon_object):
pos = self.history[vicon_object]['position'][self.read_idx]
vel = self.history[vicon_object]['body_velocity'][self.read_idx]
pos[:3] -= self.bias_xyz[self.vicon_frames[vicon_object]['idx']]
return (pos, vel)
def reset_bias(self, vicon_object):
self.bias_xyz[self.vicon_frames[vicon_object]['idx']] = 0
def bias_position(self, vicon_object):
pos = self.history[vicon_object]['position'][self.read_idx]
self.bias_xyz[self.vicon_frames[vicon_object]['idx'], :2] = pos[:2].copy()
if has_real_vicon:
class Vicon:
"""Abstraction around the ViconSDK to read position and velocity"""
def __init__(self, address, objects):
self.client = ViconClient()
self.client.initialize(address)
self.client.run()
self.objects = objects
self.object_data = {}
for i, object in enumerate(objects):
self.object_data[object] = {
'frame': ViconFrame(),
'bias': np.zeros(3)
}
print('Vicon: Tracking object', object)
def update(self, thread_head):
for object, data in self.object_data.items():
self.client.get_vicon_frame(object, data['frame'])
def get_state(self, vicon_object):
data = self.object_data[vicon_object]
pos = data['frame'].se3_pose.copy()
pos[:3] -= data['bias']
return pos, data['frame'].velocity_body_frame.copy()
def bias_position(self, vicon_object):
data = self.object_data[vicon_object]
data['bias'][:2] = data['frame'].se3_pose[:2].copy()
def set_bias_z(self, vicon_object, bias_z):
self.object_data[vicon_object]['bias'][2] = bias_z
else:
class Vicon:
def __init__(self, address, objects):
raise Exception('vicon_sdk_cpp not found. Is it installed?')
| 34.45625
| 94
| 0.58353
|
c66910832fff5402c3014f5236740f06aa69feef
| 14,615
|
py
|
Python
|
neutron/db/db_base_plugin_common.py
|
p0i0/openstack-neutron
|
df2ee28ae9a43cc511482bd6ece5396eb1288814
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/db_base_plugin_common.py
|
p0i0/openstack-neutron
|
df2ee28ae9a43cc511482bd6ece5396eb1288814
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/db_base_plugin_common.py
|
p0i0/openstack-neutron
|
df2ee28ae9a43cc511482bd6ece5396eb1288814
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib.api import validators
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import utils
from neutron.db import common_db_mixin
from neutron.db import models_v2
from neutron.objects import subnet as subnet_obj
from neutron.objects import subnetpool as subnetpool_obj
LOG = logging.getLogger(__name__)
def convert_result_to_dict(f):
@functools.wraps(f)
def inner(*args, **kwargs):
result = f(*args, **kwargs)
if result is None:
return None
elif isinstance(result, list):
return [r.to_dict() for r in result]
else:
return result.to_dict()
return inner
def filter_fields(f):
@functools.wraps(f)
def inner_filter(*args, **kwargs):
result = f(*args, **kwargs)
fields = kwargs.get('fields')
if not fields:
try:
pos = f.__code__.co_varnames.index('fields')
fields = args[pos]
except (IndexError, ValueError):
return result
do_filter = lambda d: {k: v for k, v in d.items() if k in fields}
if isinstance(result, list):
return [do_filter(obj) for obj in result]
else:
return do_filter(result)
return inner_filter
class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
"""Stores getters and helper methods for db_base_plugin_v2
All private getters and simple helpers like _make_*_dict were moved from
db_base_plugin_v2.
More complicated logic and public methods left in db_base_plugin_v2.
Main purpose of this class is to make getters accessible for Ipam
backends.
"""
@staticmethod
def _generate_mac():
return utils.get_random_mac(cfg.CONF.base_mac.split(':'))
def _is_mac_in_use(self, context, network_id, mac_address):
return bool(context.session.query(models_v2.Port).
filter(models_v2.Port.network_id == network_id).
filter(models_v2.Port.mac_address == mac_address).
count())
@staticmethod
def _delete_ip_allocation(context, network_id, subnet_id, ip_address):
# Delete the IP address from the IPAllocate table
LOG.debug("Delete allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id})
with context.session.begin(subtransactions=True):
for ipal in (context.session.query(models_v2.IPAllocation).
filter_by(network_id=network_id,
ip_address=ip_address,
subnet_id=subnet_id)):
context.session.delete(ipal)
@staticmethod
def _store_ip_allocation(context, ip_address, network_id, subnet_id,
port_id):
LOG.debug("Allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s/%(port_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id,
'port_id': port_id})
allocated = models_v2.IPAllocation(
network_id=network_id,
port_id=port_id,
ip_address=ip_address,
subnet_id=subnet_id
)
context.session.add(allocated)
# NOTE(kevinbenton): We add this to the session info so the sqlalchemy
# object isn't immediately garbage collected. Otherwise when the
# fixed_ips relationship is referenced a new persistent object will be
# added to the session that will interfere with retry operations.
# See bug 1556178 for details.
context.session.info.setdefault('allocated_ips', []).append(allocated)
def _make_subnet_dict(self, subnet, fields=None, context=None):
res = {'id': subnet['id'],
'name': subnet['name'],
'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'subnetpool_id': subnet.get('subnetpool_id'),
'allocation_pools': [{'start': pool['first_ip'],
'end': pool['last_ip']}
for pool in subnet['allocation_pools']],
'gateway_ip': subnet['gateway_ip'],
'enable_dhcp': subnet['enable_dhcp'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'ipv6_address_mode': subnet['ipv6_address_mode'],
'dns_nameservers': [dns['address']
for dns in subnet['dns_nameservers']],
'host_routes': [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in subnet['routes']],
}
# The shared attribute for a subnet is the same as its parent network
res['shared'] = self._is_network_shared(context, subnet.networks)
# Call auxiliary extend functions, if any
self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
return self._fields(res, fields)
def _make_subnetpool_dict(self, subnetpool, fields=None):
default_prefixlen = str(subnetpool['default_prefixlen'])
min_prefixlen = str(subnetpool['min_prefixlen'])
max_prefixlen = str(subnetpool['max_prefixlen'])
res = {'id': subnetpool['id'],
'name': subnetpool['name'],
'tenant_id': subnetpool['tenant_id'],
'default_prefixlen': default_prefixlen,
'min_prefixlen': min_prefixlen,
'max_prefixlen': max_prefixlen,
'is_default': subnetpool['is_default'],
'shared': subnetpool['shared'],
'prefixes': [str(prefix)
for prefix in subnetpool['prefixes']],
'ip_version': subnetpool['ip_version'],
'default_quota': subnetpool['default_quota'],
'address_scope_id': subnetpool['address_scope_id']}
self._apply_dict_extend_functions(attributes.SUBNETPOOLS, res,
subnetpool)
return self._fields(res, fields)
def _make_port_dict(self, port, fields=None,
process_extensions=True):
res = {"id": port["id"],
'name': port['name'],
"network_id": port["network_id"],
'tenant_id': port['tenant_id'],
"mac_address": port["mac_address"],
"admin_state_up": port["admin_state_up"],
"status": port["status"],
"fixed_ips": [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in port["fixed_ips"]],
"device_id": port["device_id"],
"device_owner": port["device_owner"]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.PORTS, res, port)
return self._fields(res, fields)
def _get_network(self, context, id):
try:
network = self._get_by_id(context, models_v2.Network, id)
except exc.NoResultFound:
raise n_exc.NetworkNotFound(net_id=id)
return network
def _get_subnet(self, context, id):
try:
subnet = self._get_by_id(context, models_v2.Subnet, id)
except exc.NoResultFound:
raise n_exc.SubnetNotFound(subnet_id=id)
return subnet
def _get_subnetpool(self, context, id):
subnetpool = subnetpool_obj.SubnetPool.get_object(
context, id=id)
if not subnetpool:
raise exceptions.SubnetPoolNotFound(subnetpool_id=id)
return subnetpool
def _get_port(self, context, id):
try:
port = self._get_by_id(context, models_v2.Port, id)
except exc.NoResultFound:
raise n_exc.PortNotFound(port_id=id)
return port
def _get_dns_by_subnet(self, context, subnet_id):
return subnet_obj.DNSNameServer.get_objects(context,
subnet_id=subnet_id)
def _get_route_by_subnet(self, context, subnet_id):
route_qry = context.session.query(models_v2.SubnetRoute)
return route_qry.filter_by(subnet_id=subnet_id).all()
def _get_router_gw_ports_by_network(self, context, network_id):
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(network_id=network_id,
device_owner=constants.DEVICE_OWNER_ROUTER_GW).all()
def _get_subnets_by_network(self, context, network_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(network_id=network_id).all()
def _get_subnets_by_subnetpool(self, context, subnetpool_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all()
def _get_all_subnets(self, context):
# NOTE(salvatore-orlando): This query might end up putting
# a lot of stress on the db. Consider adding a cache layer
return context.session.query(models_v2.Subnet).all()
def _get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
make_subnet_dict = functools.partial(self._make_subnet_dict,
context=context)
return self._get_collection(context, models_v2.Subnet,
make_subnet_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_network_dict(self, network, fields=None,
process_extensions=True, context=None):
res = {'id': network['id'],
'name': network['name'],
'tenant_id': network['tenant_id'],
'admin_state_up': network['admin_state_up'],
'mtu': network.get('mtu', n_const.DEFAULT_NETWORK_MTU),
'status': network['status'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
res['shared'] = self._is_network_shared(context, network)
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.NETWORKS, res, network)
return self._fields(res, fields)
def _is_network_shared(self, context, network):
# The shared attribute for a network now reflects if the network
# is shared to the calling tenant via an RBAC entry.
matches = ('*',) + ((context.tenant_id,) if context else ())
for entry in network.rbac_entries:
if (entry.action == 'access_as_shared' and
entry.target_tenant in matches):
return True
return False
def _make_subnet_args(self, detail, subnet, subnetpool_id):
gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None
args = {'tenant_id': detail.tenant_id,
'id': detail.subnet_id,
'name': subnet['name'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': str(detail.subnet_cidr),
'subnetpool_id': subnetpool_id,
'enable_dhcp': subnet['enable_dhcp'],
'gateway_ip': gateway_ip,
'description': subnet.get('description')}
if subnet['ip_version'] == 6 and subnet['enable_dhcp']:
if validators.is_attr_set(subnet['ipv6_ra_mode']):
args['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
if validators.is_attr_set(subnet['ipv6_address_mode']):
args['ipv6_address_mode'] = subnet['ipv6_address_mode']
return args
def _make_fixed_ip_dict(self, ips):
# Excludes from dict all keys except subnet_id and ip_address
return [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in ips]
def _port_filter_hook(self, context, original_model, conditions):
# Apply the port filter only in non-admin and non-advsvc context
if self.model_query_scope(context, original_model):
conditions |= (
(context.tenant_id == models_v2.Network.tenant_id) &
(models_v2.Network.id == models_v2.Port.network_id))
return conditions
def _port_query_hook(self, context, original_model, query):
# we need to outerjoin to networks if the model query scope
# is necessary so we can filter based on network id. without
# this the conditions in the filter hook cause the networks
# table to be added to the FROM statement so we get lots of
# duplicated rows that break the COUNT operation
if self.model_query_scope(context, original_model):
query = query.outerjoin(models_v2.Network)
return query
| 43.497024
| 78
| 0.601984
|
b4d48dbc98e5eaf29b676ae84bed0910cfd4495b
| 149
|
py
|
Python
|
django/contrib/gis/geometry/backend/geos.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 2
|
2016-09-27T09:30:19.000Z
|
2016-10-17T01:47:43.000Z
|
env/lib/python2.7/site-packages/django/contrib/gis/geometry/backend/geos.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
env/lib/python2.7/site-packages/django/contrib/gis/geometry/backend/geos.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 1
|
2021-03-06T14:22:00.000Z
|
2021-03-06T14:22:00.000Z
|
from django.contrib.gis.geos import (
GEOSGeometry as Geometry, GEOSException as GeometryException)
__all__ = ['Geometry', 'GeometryException']
| 29.8
| 65
| 0.778523
|
0bc041db2dd0b1ce16f9c7b139bd9ab2dbc29343
| 145
|
py
|
Python
|
proxytest/__main__.py
|
yoleg/proxytest
|
63c85b9b14c35de72fce4542ae44080ee6082efb
|
[
"MIT"
] | 2
|
2020-01-09T14:42:50.000Z
|
2020-08-18T10:26:55.000Z
|
proxytest/__main__.py
|
yoleg/proxytest
|
63c85b9b14c35de72fce4542ae44080ee6082efb
|
[
"MIT"
] | null | null | null |
proxytest/__main__.py
|
yoleg/proxytest
|
63c85b9b14c35de72fce4542ae44080ee6082efb
|
[
"MIT"
] | null | null | null |
""" Entry point for calling with python -m proxytest ... """
import sys
from . import run_from_command_line
sys.exit(run_from_command_line())
| 18.125
| 60
| 0.744828
|
ce7919dc3311ee1057493444a01e3c55975e0b38
| 622
|
py
|
Python
|
newlines.py
|
LRjunior/klavir-zakladne-principy-hrania
|
490907a5d28b0d0e54525aad2dab4f3bff28b166
|
[
"CC0-1.0"
] | null | null | null |
newlines.py
|
LRjunior/klavir-zakladne-principy-hrania
|
490907a5d28b0d0e54525aad2dab4f3bff28b166
|
[
"CC0-1.0"
] | null | null | null |
newlines.py
|
LRjunior/klavir-zakladne-principy-hrania
|
490907a5d28b0d0e54525aad2dab4f3bff28b166
|
[
"CC0-1.0"
] | null | null | null |
import re
import codecs
filenamein = 'text.tex' #'zaklady_cvicenia_na_klaviri.tex'
filenameout = 'textout.tex'
fh = codecs.open(filenamein, 'r', 'utf-8')
data = fh.read()
fh.close()
lines = data.split("\n")
processed = ''
lineslen = len(lines)
par = False
for i,a in enumerate(lines):
b = a.rstrip()
#if i > 0:
# p = lines[i-1].rstrip()
if True:
if len(b) > 0:
if par:
processed += ' ' + b
else:
processed += b
par = True
else:
par = False
processed += "\r\n\r\n"
f_out = codecs.open(filenameout, 'w', 'utf-8')
f_out.write(processed)
f_out.close()
| 17.277778
| 59
| 0.580386
|
e116f3bdf0917e2aeece1bc51499034c142b9bcc
| 30,854
|
py
|
Python
|
frame_2D_alg/alternative versions/comp_slice_flip.py
|
khanh93vn/CogAlg
|
b984c12316e266dd8f012dee90ce26bd604fbdd1
|
[
"MIT"
] | 11
|
2018-12-01T04:20:06.000Z
|
2021-05-18T08:43:51.000Z
|
frame_2D_alg/alternative versions/comp_slice_flip.py
|
khanh93vn/CogAlg
|
b984c12316e266dd8f012dee90ce26bd604fbdd1
|
[
"MIT"
] | null | null | null |
frame_2D_alg/alternative versions/comp_slice_flip.py
|
khanh93vn/CogAlg
|
b984c12316e266dd8f012dee90ce26bd604fbdd1
|
[
"MIT"
] | 3
|
2020-03-27T14:01:22.000Z
|
2021-07-16T13:54:56.000Z
|
'''
Comp_slice is a terminal fork of intra_blob.
-
It traces blob axis by cross-comparing vertically adjacent Ps: horizontal slices across an edge blob.
These low-M high-Ma blobs are vectorized into outlines of adjacent flat or high-M blobs.
(high match: M / Ma, roughly corresponds to low gradient: G / Ga)
-
Vectorization is clustering of Ps + their derivatives (derPs) into PPs: patterns of Ps that describe an edge.
This process is a reduced-dimensionality (2D->1D) version of cross-comp and clustering cycle, common across this project.
As we add higher dimensions (2D alg, 3D alg), this dimensionality reduction is done in salient high-aspect blobs
(likely edges / contours in 2D or surfaces in 3D) to form more compressed skeletal representations of full-D patterns.
-
Please see diagram:
https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/comp_slice_flip.drawio
'''
from collections import deque
import sys
import numpy as np
from class_cluster import ClusterStructure, NoneType
from slice_utils import draw_PP_
import warnings # to detect overflow issue, in case of infinity loop
warnings.filterwarnings('error')
ave = 30 # filter or hyper-parameter, set as a guess, latter adjusted by feedback, not needed here
aveG = 50 # filter for comp_g, assumed constant direction
flip_ave = .1
flip_ave_FPP = 0 # flip large FPPs only (change to 0 for debug purpose)
div_ave = 200
ave_dX = 10 # difference between median x coords of consecutive Ps
ave_Dx = 10
ave_mP = 8 # just a random number right now.
ave_rmP = .7 # the rate of mP decay per relative dX (x shift) = 1: initial form of distance
ave_ortho = 20
class CDert(ClusterStructure):
I = int
Dy = int
Dx = int
G = int
M = int
Dyy = int
Dyx = int
Dxy = int
Dxx = int
Ga = int
Ma = int
Mdx = int
Ddx = int
flip_val = int
class CP(ClusterStructure):
Dert = object # summed kernel parameters
L = int
x0 = int
dX = int # shift of average x between P and _P, if any
y = int # for visualization only
sign = NoneType # sign of gradient deviation
dert_ = list # array of pixel-level derts: (p, dy, dx, g, m), extended in intra_blob
upconnect_ = list
downconnect_cnt = int
derP = object # derP object reference
# only in Pd:
Pm = object # reference to root P
dxdert_ = list
# only in Pm:
Pd_ = list
class CderP(ClusterStructure):
## derDert
mP = int
dP = int
mx = int
dx = int
mL = int
dL = int
mDx = int
dDx = int
mDy = int
dDy = int
P = object # lower comparand
_P = object # higher comparand
PP = object # FPP if flip_val, contains this derP
fxflip = bool # flag: derP is a higher splicing | flipping point
_fxflip = bool # flag: derP is a lower splicing | flipping point
# from comp_dx
fdx = NoneType
# optional:
dDdx = int
mDdx = int
dMdx = int
mMdx = int
class CPP(ClusterStructure):
Dert = object # set of P params accumulated in PP
derPP = object # set of derP params accumulated in PP
# between PPs:
upconnect_ = list
downconnect_cnt = int
fPPm = NoneType # PPm if 1, else PPd; not needed if packed in PP_?
fdiv = NoneType
box = list # for visualization only, original box before flipping
xflip_derP_ = list # derPs at potential splicing points
xflip_PP_ = list # potentially spliced PPs in FPP
# FPP params
dert__ = list
mask__ = bool
# PP params
derP__ = list
P__ = list
# PP FPP params
derPf__ = list
Pf__ = list
PPmm_ = list
PPdm_ = list
PPmmf_ = list
PPdmf_ = list
# PPd params
derPd__ = list
Pd__ = list
# PPd FPP params
derPdf__ = list
Pdf__ = list
PPmd_ = list
PPdd_ = list
PPmdf_ = list
PPddf_ = list # comp_dx params
# Functions:
'''
leading '_' denotes higher-line variable or structure, vs. same-type lower-line variable or structure
trailing '_' denotes array name, vs. same-name elements of that array. '__' is a 2D array
leading 'f' denotes flag
-
rough workflow:
-
intra_blob -> slice_blob(blob) -> derP_ -> PP,
if flip_val(PP is FPP): pack FPP in blob.PP_ -> flip FPP.dert__ -> slice_blob(FPP) -> pack PP in FPP.PP_
else (PP is PP): pack PP in blob.PP_
'''
def slice_blob(blob, verbose=False):
'''
Slice_blob converts selected smooth-edge blobs (high G, low Ga) into sliced blobs,
adding horizontal blob slices: Ps or 1D patterns
'''
if not isinstance(blob, CPP): # input is blob, else FPP, no flipping
flip_eval_blob(blob)
dert__ = blob.dert__
mask__ = blob.mask__
height, width = dert__[0].shape
if verbose: print("Converting to image...")
for fPPd in range(2): # run twice, 1st loop fPPd=0: form PPs, 2nd loop fPPd=1: form PPds
P__ , derP__, Pd__, derPd__ = [], [], [], []
zip_dert__ = zip(*dert__)
_P_ = form_P_(list(zip(*next(zip_dert__))), mask__[0], 0) # 1st upper row
P__ += _P_ # frame of Ps
for y, dert_ in enumerate(zip_dert__, start=1): # scan top down
if verbose: print(f"\rProcessing line {y + 1}/{height}, ", end=""); sys.stdout.flush()
P_ = form_P_(list(zip(*dert_)), mask__[y], y) # horizontal clustering - lower row
derP_ = scan_P_(P_, _P_) # tests for x overlap between Ps, calls comp_slice
Pd_ = form_Pd_(P_) # form Pds within Ps
derPd_ = scan_Pd_(P_, _P_) # adds upconnect_ in Pds and calls derPd_2_PP_derPd_, same as derP_2_PP_
derP__ += derP_; derPd__ += derPd_ # frame of derPs
P__ += P_; Pd__ += Pd_
_P_ = P_ # set current lower row P_ as next upper row _P_
form_PP_shell(blob, derP__, P__, derPd__, Pd__, fPPd) # form PPs in blob or in FPP
# draw PPs and FPPs
if not isinstance(blob, CPP):
draw_PP_(blob)
def form_P_(idert_, mask_, y): # segment dert__ into P__, in horizontal ) vertical order
'''
sums dert params within Ps and increments L: horizontal length.
'''
P_ = [] # rows of derPs
dert_ = [list(idert_[0])] # get first dert from idert_ (generator/iterator)
_mask = mask_[0] # mask bit per dert
if ~_mask:
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert_[0]; L = 1; x0 = 0 # initialize P params with first dert
for x, dert in enumerate(idert_[1:], start=1): # left to right in each row of derts
mask = mask_[x] # pixel mask
if mask: # masks: if 1,_0: P termination, if 0,_1: P initialization, if 0,_0: P accumulation:
if ~_mask: # _dert is not masked, dert is masked, terminate P:
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma), L=L, x0=x0, dert_=dert_, y=y)
P_.append(P)
else: # dert is not masked
if _mask: # _dert is masked, initialize P params:
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert; L = 1; x0 = x; dert_ = [dert]
else:
I += dert[0] # _dert is not masked, accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert
Dy += dert[1]
Dx += dert[2]
G += dert[3]
M += dert[4]
Dyy += dert[5]
Dyx += dert[6]
Dxy += dert[7]
Dxx += dert[8]
Ga += dert[9]
Ma += dert[10]
L += 1
dert_.append(dert)
_mask = mask
if ~_mask: # terminate last P in a row
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma), L=L, x0=x0, dert_=dert_, y=y)
P_.append(P)
return P_
def form_Pd_(P_):
'''
form Pd s across P's derts using Dx sign
'''
Pd__ = []
for iP in P_:
if (iP.downconnect_cnt>0) or (iP.upconnect_): # form Pd s if at least one connect in P, else they won't be compared
P_Ddx = 0 # sum of Ddx across Pd s
P_Mdx = 0 # sum of Mdx across Pd s
Pd_ = [] # Pds in P
_dert = iP.dert_[0] # 1st dert
dert_ = [_dert]
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = _dert; L = 1; x0 = iP.x0 # initialize P params with first dert
_sign = _dert[2] > 0
x = 1 # relative x within P
for dert in iP.dert_[1:]:
sign = dert[2] > 0
if sign == _sign: # same Dx sign
I += dert[0] # accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert
Dy += dert[1]
Dx += dert[2]
G += dert[3]
M += dert[4]
Dyy += dert[5]
Dyx += dert[6]
Dxy += dert[7]
Dxx += dert[8]
Ga += dert[9]
Ma += dert[10]
L += 1
dert_.append(dert)
else: # sign change, terminate P
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma),
L=L, x0=x0, dert_=dert_, y=iP.y, sign=_sign, Pm=iP)
if Dx > ave_Dx:
# cross-comp of dx in P.dert_
comp_dx(P); P_Ddx += P.Dert.Ddx; P_Mdx += P.Dert.Mdx
Pd_.append(P)
# reinitialize params
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert; x0 = iP.x0+x; L = 1; dert_ = [dert]
_sign = sign
x += 1
# terminate last P
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma),
L=L, x0=x0, dert_=dert_, y=iP.y, sign=_sign, Pm=iP)
if Dx > ave_Dx:
comp_dx(P); P_Ddx += P.Dert.Ddx; P_Mdx += P.Dert.Mdx
Pd_.append(P)
# update Pd params in P
iP.Pd_ = Pd_; iP.Dert.Ddx = P_Ddx; iP.Dert.Mdx = P_Mdx
Pd__ += Pd_
return Pd__
def scan_P_(P_, _P_): # test for x overlap between Ps, call comp_slice
derP_ = []
for P in P_: # lower row
for _P in _P_: # upper row
# test for x overlap between P and _P in 8 directions
if (P.x0 - 1 < (_P.x0 + _P.L) and (P.x0 + P.L) + 1 > _P.x0): # all Ps here are positive
fcomp = [1 for derP in P.upconnect_ if P is derP.P] # upconnect could be derP or dirP
if not fcomp:
derP = comp_slice_full(_P, P) # form vertical and directional derivatives
derP_.append(derP)
P.upconnect_.append(derP)
_P.downconnect_cnt += 1
elif (P.x0 + P.L) < _P.x0: # stop scanning the rest of lower P_ if there is no overlap
break
return derP_
def scan_Pd_(P_, _P_): # test for x overlap between Pds
derPd_ = []
for P in P_: # lower row
for _P in _P_: # upper row
for Pd in P.Pd_: # lower row Pds
for _Pd in _P.Pd_: # upper row Pds
# test for same sign & x overlap between Pd and _Pd in 8 directions
if (Pd.x0 - 1 < (_Pd.x0 + _Pd.L) and (Pd.x0 + Pd.L) + 1 > _Pd.x0) and (Pd.sign == _Pd.sign):
fcomp = [1 for derPd in Pd.upconnect_ if Pd is derPd.P] # upconnect could be derP or dirP
if not fcomp:
derPd = comp_slice_full(_Pd, Pd)
derPd_.append(derPd)
Pd.upconnect_.append(derPd)
_Pd.downconnect_cnt += 1
elif (Pd.x0 + Pd.L) < _Pd.x0: # stop scanning the rest of lower P_ if there is no overlap
break
return derPd_
def form_PP_shell(blob, derP__, P__, derPd__, Pd__, fPPd):
'''
form vertically contiguous patterns of patterns by the sign of derP, in blob or in FPP
'''
if not isinstance(blob, CPP): # input is blob
blob.derP__ = derP__; blob.P__ = P__
blob.derPd__ = derPd__; blob.Pd__ = Pd__
if fPPd:
derP_2_PP_(blob.derP__, blob.PPdm_, 1, 1) # cluster by derPm dP sign
derP_2_PP_(blob.derPd__, blob.PPdd_, 1, 1) # cluster by derPd dP sign
else:
derP_2_PP_(blob.derP__, blob.PPmm_, 1, 0) # cluster by derPm mP sign
derP_2_PP_(blob.derPd__, blob.PPmd_, 1, 0) # cluster by derPd mP sign
# assign spliced_PP after forming all PPs and FPPs
PPs_ = [blob.PPdm_, blob.PPdd_, blob.PPmm_, blob.PPmd_]
for PP_ in PPs_:
for PP in PP_: # splice FPP with connected PPs:
for derP in PP.xflip_derP_: # not empty in FPPs bordering on PPs only
_P = derP._P
'''
if (derP._fxflip) and isinstance(_P.derP, CderP) and (_P.derP.PP not in PP.xflip_PP_):
PP.xflip_PP_.append(_P.derP.PP) # derP is a lower splice point
if ((derP.fxflip) or (derP._fxflip)) and (derP.PP not in PP.xflip_PP_):
PP.xflip_PP_.append(derP.PP) # derP is a higher splice point
'''
# current PP is FPP, add spliced derP'PPs to current FPP
if (derP.P.Dert.flip_val >0) and (derP.mP>0):
if (derP.PP not in PP.xflip_PP_):
PP.xflip_PP_.append(derP.PP)
if isinstance(_P.derP, CderP) and (_P.derP.PP not in PP.xflip_PP_):
PP.xflip_PP_.append(_P.derP.PP)
# current PP is not FPP, add current PP to _P.PP (_P.PP is FPP)
else:
if isinstance(_P.derP, CderP) and (derP.PP not in derP._P.derP.PP.xflip_PP_):
derP._P.derP.PP.xflip_PP_.append(derP.PP)
else:
FPP = blob # reassign for clarity
FPP.derPf__ = derP__; FPP.Pf__ = P__
FPP.derPdf__ = derPd__; FPP.Pdf__ = Pd__
if fPPd:
derP_2_PP_(FPP.derPf__, FPP.PPdmf_, 0, 1) # cluster by derPmf dP sign
derP_2_PP_(FPP.derPdf__, FPP.PPddf_, 0, 1) # cluster by derPdf dP sign
else:
derP_2_PP_(FPP.derPf__, FPP.PPmmf_, 0, 0) # cluster by derPmf mP sign
derP_2_PP_(FPP.derPdf__, FPP.PPmdf_, 0, 0) # cluster by derPdf mP sign
def derP_2_PP_(derP_, PP_, fflip, fPPd):
'''
first row of derP_ has downconnect_cnt == 0, higher rows may also have them
'''
for derP in reversed(derP_): # bottom-up to follow upconnects, derP is stored top-down
if not derP.P.downconnect_cnt and not isinstance(derP.PP, CPP): # root derP was not terminated in prior call
PP = CPP(Dert=CDert(), derPP=CderP()) # init
accum_PP(PP,derP)
if derP._P.upconnect_: # derP has upconnects
upconnect_2_PP_(derP, PP_, fflip, fPPd) # form PPs across _P upconnects
else:
if (derP.PP.Dert.flip_val > flip_ave_FPP) and fflip:
flip_FPP(derP.PP)
PP_.append(derP.PP)
def upconnect_2_PP_(iderP, PP_, fflip, fPPd):
'''
compare sign of lower-layer iderP to the sign of its upconnects to form contiguous same-sign PPs
'''
confirmed_upconnect_ = []
for derP in iderP._P.upconnect_: # potential upconnects from previous call
if derP not in iderP.PP.derP__: # derP should not in current iPP derP_ list, but this may occur after the PP merging
if (derP.P.Dert.flip_val>0 and iderP.P.Dert.flip_val>0 and iderP.PP.Dert.flip_val>0):
# upconnect derP has different FPP, merge them
if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP):
merge_PP(iderP.PP, derP.PP, PP_)
else: # accumulate derP to current FPP
accum_PP(iderP.PP, derP)
confirmed_upconnect_.append(derP)
# not FPP
else:
if fPPd: same_sign = (iderP.dP > 0) == (derP.dP > 0) # comp dP sign
else: same_sign = (iderP.mP > 0) == (derP.mP > 0) # comp mP sign
if same_sign and not (iderP.P.Dert.flip_val>0) and not (derP.P.Dert.flip_val>0): # upconnect derP has different PP, merge them
if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP):
merge_PP(iderP.PP, derP.PP, PP_)
else: # accumulate derP in current PP
accum_PP(iderP.PP, derP)
confirmed_upconnect_.append(derP)
elif not isinstance(derP.PP, CPP): # sign changed, derP is root derP unless it already has FPP/PP
PP = CPP(Dert=CDert(), derPP=CderP())
accum_PP(PP,derP)
derP.P.downconnect_cnt = 0 # reset downconnect count for root derP
if derP._P.upconnect_:
upconnect_2_PP_(derP, PP_, fflip, fPPd) # recursive compare sign of next-layer upconnects
elif derP.PP is not iderP.PP and derP.P.downconnect_cnt == 0:
if (derP.PP.Dert.flip_val > flip_ave_FPP) and fflip:
flip_FPP(derP.PP)
PP_.append(derP.PP) # terminate PP (not iPP) at the sign change
iderP._P.upconnect_ = confirmed_upconnect_
if not iderP.P.downconnect_cnt:
if (iderP.PP.Dert.flip_val > flip_ave_FPP) and fflip:
flip_FPP(iderP.PP)
PP_.append(iderP.PP) # iPP is terminated after all upconnects are checked
def merge_PP(_PP, PP, PP_): # merge PP into _PP
for derP in PP.derP__:
if derP not in _PP.derP__:
_PP.derP__.append(derP)
derP.PP = _PP # update reference
Dert = derP.P.Dert
# accumulate Dert param of derP
_PP.Dert.accumulate(I=Dert.I, Dy=Dert.Dy, Dx=Dert.Dx, G=Dert.G, M=Dert.M, Dyy=Dert.Dyy, Dyx=Dert.Dyx, Dxy=Dert.Dxy, Dxx=Dert.Dxx,
Ga=Dert.Ga, Ma=Dert.Ma, Mdx=Dert.Mdx, Ddx=Dert.Ddx, flip_val=Dert.flip_val)
# accumulate if PP' derP not in _PP
_PP.derPP.accumulate(mP=derP.mP, dP=derP.dP, mx=derP.mx, dx=derP.dx,
mL=derP.mL, dL=derP.dL, mDx=derP.mDx, dDx=derP.dDx,
mDy=derP.mDy, dDy=derP.dDy)
for splice_derP in PP.xflip_derP_:
if splice_derP not in _PP.xflip_derP_:
_PP.xflip_derP_.append(splice_derP)
if PP in PP_:
PP_.remove(PP) # remove merged PP
def flip_FPP(FPP):
'''
flip derts of FPP and call again slice_blob to get PPs of FPP
'''
# get box from P and P
x0 = min(min([derP.P.x0 for derP in FPP.derP__]), min([derP._P.x0 for derP in FPP.derP__]))
xn = max(max([derP.P.x0+derP.P.L for derP in FPP.derP__]), max([derP._P.x0+derP._P.L for derP in FPP.derP__]))
y0 = min(min([derP.P.y for derP in FPP.derP__]), min([derP._P.y for derP in FPP.derP__]))
yn = max(max([derP.P.y for derP in FPP.derP__]), max([derP._P.y for derP in FPP.derP__])) +1 # +1 because yn is not inclusive
FPP.box = [y0,yn,x0,xn]
# init empty derts, 11 params each: p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma
dert__ = [np.zeros((yn-y0, xn-x0)) for _ in range(11)]
mask__ = np.ones((yn-y0, xn-x0)).astype('bool')
# fill empty dert with current FPP derts
for derP in FPP.derP__:
# _P
for _x, _dert in enumerate(derP._P.dert_):
for i, _param in enumerate(_dert):
dert__[i][derP._P.y-y0, derP._P.x0-x0+_x] = _param
mask__[derP._P.y-y0, derP._P.x0-x0+_x] = False
# P
for x, dert in enumerate(derP.P.dert_):
for j, param in enumerate(dert):
dert__[j][derP.P.y-y0, derP.P.x0-x0+x] = param
mask__[derP.P.y-y0, derP.P.x0-x0+x] = False
# flip dert__
flipped_dert__ = [np.rot90(dert) for dert in dert__]
flipped_mask__ = np.rot90(mask__)
flipped_dert__[1],flipped_dert__[2] = \
flipped_dert__[2],flipped_dert__[1] # swap dy and dx in derts, always flipped in FPP
FPP.dert__ = flipped_dert__
FPP.mask__ = flipped_mask__
# form PP_ in flipped FPP
slice_blob(FPP, verbose=True)
def flip_eval_blob(blob):
# L_bias (Lx / Ly) * G_bias (Gy / Gx), blob.box = [y0,yn,x0,xn], ddirection: preferential comp over low G
horizontal_bias = (blob.box[3] - blob.box[2]) / (blob.box[1] - blob.box[0]) \
* (abs(blob.Dy) / abs(blob.Dx))
if horizontal_bias > 1 and (blob.G * blob.Ma * horizontal_bias > flip_ave / 10):
blob.fflip = 1 # rotate 90 degrees for scanning in vertical direction
# swap blob Dy and Dx:
Dy=blob.Dy; blob.Dy = blob.Dx; blob.Dx = Dy
# rotate dert__:
blob.dert__ = tuple([np.rot90(dert) for dert in blob.dert__])
blob.mask__ = np.rot90(blob.mask__)
# swap dert dys and dxs:
blob.dert__ = list(blob.dert__) # convert to list since param in tuple is immutable
blob.dert__[1], blob.dert__[2] = \
blob.dert__[2], blob.dert__[1]
def accum_Dert(Dert: dict, **params) -> None:
Dert.update({param: Dert[param] + value for param, value in params.items()})
def accum_PP(PP, derP): # accumulate derP params in PP
Dert = derP.P.Dert
# accumulate Dert params
''' use:
for param, PP_param in zip(Dert, PP.Dert):
PP_param+=param
? '''
PP.Dert.accumulate(I=Dert.I, Dy=Dert.Dy, Dx=Dert.Dx, G=Dert.G, M=Dert.M, Dyy=Dert.Dyy, Dyx=Dert.Dyx, Dxy=Dert.Dxy, Dxx=Dert.Dxx,
Ga=Dert.Ga, Ma=Dert.Ma, Mdx=Dert.Mdx, Ddx=Dert.Ddx, flip_val=Dert.flip_val)
# accumulate derP params
PP.derPP.accumulate(mP=derP.mP, dP=derP.dP, mx=derP.mx, dx=derP.dx, mL=derP.mL, dL=derP.dL, mDx=derP.mDx, dDx=derP.dDx,
mDy=derP.mDy, dDy=derP.dDy)
PP.derP__.append(derP)
derP.PP = PP # update reference
if derP.fxflip or derP._fxflip: # add splice point
PP.xflip_derP_.append(derP)
def comp_dx(P): # cross-comp of dx s in P.dert_
Ddx = 0
Mdx = 0
dxdert_ = []
_dx = P.dert_[0][2] # first dx
for dert in P.dert_[1:]:
dx = dert[2]
ddx = dx - _dx
if dx > 0 == _dx > 0: mdx = min(dx, _dx)
else: mdx = -min(abs(dx), abs(_dx))
dxdert_.append((ddx, mdx)) # no dx: already in dert_
Ddx += ddx # P-wide cross-sign, P.L is too short to form sub_Ps
Mdx += mdx
_dx = dx
P.dxdert_ = dxdert_
P.Dert.Ddx = Ddx
P.Dert.Mdx = Mdx
def comp_slice(_P, P, _derP_): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp
s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dert.Dx, P.Dert.Dy, P.Dert.G, P.Dert.M, P.L, P.Dert.Ddx, P.Dert.Mdx # params per comp branch
_s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dert.Dx, _P.Dert.Dy, _P.Dert.G, _P.Dert.M, _P.dX, _P.L, _P.Dert.Ddx, _P.Dert.Mdx
dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?
ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?
mdX = min(dX, _dX) # dX is inversely predictive of mP?
hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1
L /= hyp # orthogonal L is reduced by hyp
dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value
M /= hyp # orthogonal M is reduced by hyp
dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?
dP = dL + dM # -> directional PPd, equal-weight params, no rdn?
mP = mL + mM # -> complementary PPm, rdn *= Pd | Pm rolp?
mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,
P.Dert.flip_val = (dX * (P.Dert.Dy / (P.Dert.Dx+.001)) - flip_ave) # +.001 to avoid division by zero
derP = CderP(P=P, _P=_P, mP=mP, dP=dP, dX=dX, mL=mL, dL=dL)
P.derP = derP
if P.Dert.flip_val>0: # derP.PP is FPP and _derP.PP is PP
if ~(_P.Dert.flip_val>0) and (derP.mP >0) and (isinstance(_P.derP, CderP)): # positive mP AND _P.derP is derP: exclude 1st row Ps
derP.fxflip = 1 # derP is a lower splice point
elif _P.Dert.flip_val>0: # derP.PP is PP and _derP.PP is FPP
if (_P.derP.mP >0) and (isinstance(_P.derP, CderP)): # positive mP AND _P.derP is derP: exclude 1st row Ps
_P.derP._fxflip = 1 # _derP is a higher splice point
return derP
def comp_slice_full(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp
s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dert.Dx, P.Dert.Dy, P.Dert.G, P.Dert.M, P.L, P.Dert.Ddx, P.Dert.Mdx
# params per comp branch, add angle params
_s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dert.Dx, _P.Dert.Dy, _P.Dert.G, _P.Dert.M, _P.dX, _P.L, _P.Dert.Ddx, _P.Dert.Mdx
dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?
if dX > ave_dX: # internal comp is higher-power, else two-input comp not compressive?
xn = x0 + L - 1
_xn = _x0 + _L - 1
mX = min(xn, _xn) - max(x0, _x0) # overlap = abs proximity: summed binary x match
rX = dX / mX if mX else dX*2 # average dist / prox, | prox / dist, | mX / max_L?
ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?
mdX = min(dX, _dX) # dX is inversely predictive of mP?
if dX * P.Dert.G > ave_ortho: # estimate params of P locally orthogonal to long axis, maximizing lateral diff and vertical match
# diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/orthogonalization.png
# Long axis is a curve of connections between ave_xs: mid-points of consecutive Ps.
# Ortho virtually rotates P to connection-orthogonal direction:
hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1
L = L / hyp # orthogonal L
# combine derivatives in proportion to the contribution of their axes to orthogonal axes:
# contribution of Dx should increase with hyp(dX,dY=1), this is original direction of Dx:
Dy = (Dy / hyp + Dx * hyp) / 2 # estimated along-axis D
Dx = (Dy * hyp + Dx / hyp) / 2 # estimated cross-axis D
'''
alternatives:
oDy = (Dy * hyp - Dx / hyp) / 2; oDx = (Dx / hyp + Dy * hyp) / 2; or:
oDy = hypot( Dy / hyp, Dx * hyp); oDx = hypot( Dy * hyp, Dx / hyp)
'''
dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value
dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?
# no comp G: Dy, Dx are more specific:
dDx = Dx - _Dx # same-sign Dx if Pd
mDx = min(abs(Dx), abs(_Dx))
if Dx > 0 != _Dx > 0: mDx = -mDx
# min is value distance for opposite-sign comparands, vs. value overlap for same-sign comparands
dDy = Dy - _Dy # Dy per sub_P by intra_comp(dx), vs. less vertically specific dI
mDy = min(abs(Dy), abs(_Dy))
if (Dy > 0) != (_Dy > 0): mDy = -mDy
dDdx, dMdx, mDdx, mMdx = 0, 0, 0, 0
if P.dxdert_ and _P.dxdert_: # from comp_dx
fdx = 1
dDdx = Ddx - _Ddx
mDdx = min( abs(Ddx), abs(_Ddx))
if (Ddx > 0) != (_Ddx > 0): mDdx = -mDdx
# Mdx is signed:
dMdx = min( Mdx, _Mdx)
mMdx = -min( abs(Mdx), abs(_Mdx))
if (Mdx > 0) != (_Mdx > 0): mMdx = -mMdx
else:
fdx = 0
# coeff = 0.7 for semi redundant parameters, 0.5 for fully redundant parameters:
dP = ddX + dL + 0.7*(dM + dDx + dDy) # -> directional PPd, equal-weight params, no rdn?
# correlation: dX -> L, oDy, !oDx, ddX -> dL, odDy ! odDx? dL -> dDx, dDy?
if fdx: dP += 0.7*(dDdx + dMdx)
mP = mdX + mL + 0.7*(mM + mDx + mDy) # -> complementary PPm, rdn *= Pd | Pm rolp?
if fdx: mP += 0.7*(mDdx + mMdx)
mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,
P.Dert.flip_val = (dX * (P.Dert.Dy / (P.Dert.Dx+.001)) - flip_ave) # avoid division by zero
derP = CderP(P=P, _P=_P, mP=mP, dP=dP, dX=dX, mL=mL, dL=dL, mDx=mDx, dDx=dDx, mDy=mDy, dDy=dDy)
P.derP = derP
# Chee's version:
# if flip value>0 AND positive mP (predictive value) AND flip_val sign changed AND _P.derP is derP: exclude 1st row Ps
if (P.Dert.flip_val>0) and (derP.mP >0) and ((P.Dert.flip_val>0) != (_P.Dert.flip_val>0)):
derP._fxflip = 1 # derP is lower splice point (start of FPP), _derP is not FPP
# if upper row is FPP , add current lower row derP as lower spliced point if lower row flip_val <0 or mP <0
elif (isinstance(_P.derP, CderP)) and (_P.Dert.flip_val>0) and (_P.derP.mP >0) and (not (P.Dert.flip_val>0) or not (derP.mP >0)):
derP.fxflip= 1 # _derP is higher splice point (end of FPP), derP is not FPP
if fdx:
derP.fdx=1; derP.dDdx=dDdx; derP.mDdx=mDdx; derP.dMdx=dMdx; derP.mMdx=mMdx
'''
min comp for rotation: L, Dy, Dx, no redundancy?
mParam weighting by relative contribution to mP, /= redundancy?
div_f, nvars: if abs dP per PPd, primary comp L, the rest is normalized?
'''
return derP
''' radial comp extension for co-internal blobs:
!= sign comp x sum( adj_blob_) -> intra_comp value, isolation value, cross-sign merge if weak, else:
== sign comp x ind( adj_adj_blob_) -> same-sign merge | composition:
borrow = adj_G * rA: default sum div_comp S -> relative area and distance to adjj_blob_
internal sum comp if mA: in thin lines only? comp_norm_G or div_comp_G -> rG?
isolation = decay + contrast:
G - G * (rA * ave_rG: decay) - (rA * adj_G: contrast, = lend | borrow, no need to compare vG?)
if isolation: cross adjj_blob composition eval,
else: cross adjj_blob merge eval:
blob merger if internal match (~raG) - isolation, rdn external match:
blob compos if external match (~rA?) + isolation,
Also eval comp_slice over fork_?
rng+ should preserve resolution: rng+_dert_ is dert layers,
rng_sum-> rng+, der+: whole rng, rng_incr-> angle / past vs next g,
rdn Rng | rng_ eval at rng term, Rng -= lost coord bits mag, always > discr?
'''
| 43.579096
| 152
| 0.579568
|
36fa7b701218df7bb6adcf9c47c96f40ba72b2b6
| 1,872
|
py
|
Python
|
optimisation/optimise_interface.py
|
kounelisagis/kesko-food-waste-hackathon
|
6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d
|
[
"MIT"
] | 1
|
2019-12-29T16:16:54.000Z
|
2019-12-29T16:16:54.000Z
|
optimisation/optimise_interface.py
|
kounelisagis/kesko-food-waste-hackathon
|
6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d
|
[
"MIT"
] | 14
|
2019-11-16T18:27:51.000Z
|
2022-02-26T20:17:01.000Z
|
optimisation/optimise_interface.py
|
kounelisagis/kesko-food-waste-hackathon
|
6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d
|
[
"MIT"
] | 8
|
2019-11-15T20:27:32.000Z
|
2020-08-26T16:21:48.000Z
|
import json
import os
from kesko_food_waste import settings
from optimisation import optimise
from optimisation.utils import get_geodesic_distance, get_market_coordinates
def get_some_ean():
items_json_filename = os.path.join(settings.PRIVATE_DATA_ROOT, "products_all.json")
with open(items_json_filename) as items_json_file:
items_json_data = json.load(items_json_file)
return [item["ean"] for item in items_json_data[:15]]
def get_ranked_markets_interface(ean_items_list, user_position, max_time=None):
data_market_id_item_ean_filename = os.path.join(settings.PRIVATE_DATA_ROOT, "data_market_id_item_ean_all.json")
with open(data_market_id_item_ean_filename) as data_market_id_item_ean_file:
data_market_id_item_ean = json.load(data_market_id_item_ean_file)
# Only include the markets in the neighbourhood, extend only if very few
while True:
max_distance = 25
close_markets_list = [market for market in data_market_id_item_ean if
get_geodesic_distance(*user_position, *get_market_coordinates(market)) < 25]
if len(close_markets_list) >= 5:
break
else:
max_distance += 10
best_rank, best_rank_costs = optimise.get_best_ranked_markets(
market_list=close_markets_list,
items_list=ean_items_list,
user_position=user_position,
distance_weight=500,
completeness_weight=10,
threshold_cost=None,
max_iterations=500,
max_survival_probability=0.9,
population_max_size=12,
retain_parents=True,
max_time=max_time
)
return best_rank, best_rank_costs,
if __name__ == "__main__":
print(json.dumps(get_ranked_markets_interface(ean_items_list=get_some_ean(), user_position=(60.1618222, 24.737745), max_time=1),
indent=4))
| 37.44
| 132
| 0.725427
|
5fe8492ca79c800ff579828eb4b2e7f68beabdf6
| 7,673
|
py
|
Python
|
debruijngraph.py
|
wuyuMk7/CSCI5481
|
b21b7f4e5851aee93ca81728733cd6b44d718e09
|
[
"MIT"
] | null | null | null |
debruijngraph.py
|
wuyuMk7/CSCI5481
|
b21b7f4e5851aee93ca81728733cd6b44d718e09
|
[
"MIT"
] | null | null | null |
debruijngraph.py
|
wuyuMk7/CSCI5481
|
b21b7f4e5851aee93ca81728733cd6b44d718e09
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
sys.setrecursionlimit(2000000)
CUTOFF_THRESHOLD = 3
def pairSeq(seq):
base_pairs = { 'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C' }
pair = list(map(lambda x: base_pairs[x], seq))
pair.reverse()
return ''.join(pair)
def readFile(seqFile):
seqs = []
with open(seq_file, 'r') as fp:
for line in fp:
seqs.append(line.strip())
return seqs
class Graph:
def __init__(self):
self.nodes = {}
self.nodes_label_to_id = {}
self.edges = {}
self.node_id = 0
def __len__(self):
return len(self.nodes)
def addNode(self, label):
if label not in self.nodes_label_to_id:
self.nodes[self.node_id] = Node(self.node_id, label)
self.nodes_label_to_id[label] = self.node_id
self.node_id += 1
return self.nodes[self.nodes_label_to_id[label]]
def addEdge(self, src, tar, label):
if label not in self.edges:
self.edges[label] = Edge(src, tar, label)
self.nodes[src].addOutEdge(tar, self.edges[label])
self.nodes[tar].addInEdge(src, self.edges[label])
else:
self.edges[label].weights += 1
self.edges[label].stored_weights += 1
return self.edges[label]
def restoreEdgeWeights(self):
for label in self.edges:
self.edges[label].weights = self.edges[label].stored_weights
def getEdgeByLabel(self, label):
return self.edges[label] if label in self.edges else None
def getOutEdgesOfNode(self, node):
return self.nodes[node].outedges if node in self.nodes else None
def getInDegesOfNode(self, node):
return self.nodes[node].inedges if node in self.nodes else None
def mergeNodes(self, node1, node2):
pass
class Node:
def __init__(self, _id, label):
self.id = _id
self.label = label
self.skip = False
self.inedges = {}
self.outedges = {}
self.visited = False
def addInEdge(self, src, edge):
if src not in self.inedges:
self.inedges[src] = edge
return self.inedges[src]
def addOutEdge(self, tar, edge):
if tar not in self.outedges:
self.outedges[tar] = edge
return self.outedges[tar]
class Edge:
def __init__(self, src_id, tar_id, label, step=''):
self.src_id = src_id
self.tar_id = tar_id
self.label = label
self.visited = False
self.weights = 1
self.stored_weights = 1
if step == '':
self.step = label[-1]
else:
self.step = step
class DeBruijngraph:
def __init__(self, seqs, k=3):
self.seqs = seqs
self.k = k
self.kmers = None
self.kmers_cnt = None
self.graph = Graph()
def getKmers(self):
kmers_list = []
kmers_cnt = {}
for seq_i in range(len(self.seqs)):
seq = self.seqs[seq_i]
for i in range(len(seq)-self.k+1):
kmer = seq[i:i+self.k]
kmers_list.append(kmer)
if kmer in kmers_cnt:
kmers_cnt[kmer] += 1
else:
kmers_cnt[kmer] = 1
self.kmers = kmers_list
self.kmers_cnt = kmers_cnt
def buildGraph(self):
if not self.kmers:
self.getKmers()
for kmer in self.kmers:
# To be changed
if self.kmers_cnt[kmer] <= CUTOFF_THRESHOLD:
continue
init, tail = kmer[:self.k-1], kmer[1:]
init_node = self.graph.addNode(init)
tail_node = self.graph.addNode(tail)
self.graph.addEdge(init_node.id, tail_node.id, kmer)
def simplifyGraph(self):
if not self.graph:
self.buildGraph()
def performEularianWalk(self):
def dfs(node, path):
for tar_id in node.outedges:
if node.outedges[tar_id].weights > 0:
node.outedges[tar_id].weights -= 1
path.append(node.outedges[tar_id])
dfs(self.graph.nodes[tar_id], path)
paths = []
while True:
start_node = None
for edge_label in self.graph.edges:
if self.graph.edges[edge_label].weights > 0:
start_node = self.graph.edges[edge_label].src_id
break
if start_node is None:
break
path = []
dfs(self.graph.nodes[start_node], path)
paths.append(path)
self.graph.restoreEdgeWeights()
return paths
def assemble(self, paths):
paths_assembled = set()
#max_len = 0
for path in paths:
cur_seq = ''
for i in range(len(path)):
if i == 0:
cur_seq = path[i].label
else:
cur_seq += path[i].step
paths_assembled.add(cur_seq)
#if pairSeq(cur_seq) not in paths_assembled:
# paths_assembled.add(cur_seq)
#if len(cur_seq) > max_len:
# max_len = len(cur_seq)
# print(cur_seq)
return list(paths_assembled)
def genAssembly(self):
paths = self.performEularianWalk()
self.savePathToFile(paths)
paths_assembled = self.assemble(paths)
paths_assembled.sort(key=lambda path: len(path), reverse=True)
self.saveAssemblyToFile(paths_assembled)
self.saveAssemblyToFasta(paths_assembled)
def buildDeBruijnGraph(self):
self.buildGraph()
self.simplifyGraph()
def saveGraphToFile(self, nodefile="test.nodes", edgefile="test.edges"):
with open(nodefile,'w') as fp:
for key in self.graph.nodes:
fp.write('{}\t{}\n'.format(self.graph.nodes[key].id, self.graph.nodes[key].label))
with open(edgefile, 'w') as fp:
for key in self.graph.edges:
fp.write('{}\t{}\t{}\n'.format(self.graph.edges[key].src_id, self.graph.edges[key].tar_id, self.graph.edges[key].weights))
def saveDGraphToFile(self, filename="test.txt"):
pass
def savePathToFile(self, paths, pathfile="test.path"):
with open(pathfile, 'w') as fp:
for i in range(len(paths)):
fp.write('Path #{}\n'.format(i))
for edge in paths[i]:
fp.write('{}\t{}\t{}\t{}\n'.format(edge.src_id, edge.tar_id, edge.label, edge.step))
fp.write('\n\n')
def saveAssemblyToFile(self, paths_assembled, assemblyfile="test.assembly"):
with open(assemblyfile, 'w') as fp:
for i in range(len(paths_assembled)):
fp.write('Seq #{}:\n{}\n\n'.format(i, paths_assembled[i]))
def saveAssemblyToFasta(self, paths_assembled, fastafile="test.fasta"):
with open(fastafile, 'w') as fp:
for i in range(len(paths_assembled)):
if len(paths_assembled[i]) < 500:
continue
fp.write('>{}\n{}\n'.format(hash(paths_assembled[i]), paths_assembled[i]))
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Usage: {} <seq_file>'.format(sys.argv[0]))
exit(0)
seq_file = sys.argv[1]
seqs = readFile(seq_file)
dgraph = DeBruijngraph(seqs, k=30)
dgraph.buildGraph()
# dgraph.saveGraphToFile(nodefile=seq_file+'.nodes', edgefile=seq_file+'.edges')
dgraph.saveGraphToFile()
dgraph.genAssembly()
| 31.706612
| 138
| 0.554412
|
aa3a43ae08e0d51a9a19ab3d326e8d098786259d
| 4,894
|
py
|
Python
|
examples/example_07_full_reconstruction_a_grid_scan_step_03_downsample.py
|
gbzan/algotom
|
314f05b6a226e666a8ae4417b151d896606e7db4
|
[
"Apache-2.0"
] | null | null | null |
examples/example_07_full_reconstruction_a_grid_scan_step_03_downsample.py
|
gbzan/algotom
|
314f05b6a226e666a8ae4417b151d896606e7db4
|
[
"Apache-2.0"
] | null | null | null |
examples/example_07_full_reconstruction_a_grid_scan_step_03_downsample.py
|
gbzan/algotom
|
314f05b6a226e666a8ae4417b151d896606e7db4
|
[
"Apache-2.0"
] | null | null | null |
# ===========================================================================
# ===========================================================================
# Copyright (c) 2021 Nghia T. Vo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
# Author: Nghia T. Vo
# E-mail:
# Description: Examples of how to downsample a full reconstruction of a grid
# scan.
# ===========================================================================
"""
The following examples show how to downsample a full reconstruction of a grid
scan.
Running "example_07_*_step_02.py" before trying this script.
Reconstruction data from "example_07_*_step_02.py" are separated into 12 hdf
files with a size of ~944 GB/file. The whole volume will be downsampled by a
factor of 8 without an intermediate step of combining 12 files to 1 huge
file (11.3 TB in total).
"""
import timeit
import numpy as np
import algotom.io.loadersaver as losa
input_base = "D:/Full_reconstruction/"
# Where to save the outputs
output_base = "D:/Dsp_grid_scan/"
output_file = "full_size_dsp_8_8_8.hdf"
cube = (8, 8, 8) # Downsampling factor
list_file = losa.find_file(input_base + "*.hdf")
key_path = "entry/data"
list_hdf_object = []
num_file = len(list_file)
list_nslice = []
for i in range(num_file):
hdf_object = losa.load_hdf(list_file[i], key_path)
list_hdf_object.append(hdf_object)
(nslice, height, width) = hdf_object.shape
list_nslice.append(nslice)
total_slice = np.sum(np.asarray(list_nslice))
total_slice_r = (total_slice // cube[0]) * cube[0]
height_r = (height // cube[1]) * cube[1]
width_r = (width // cube[2]) * cube[2]
# Calculate the size of downsampled data.
dsp_slice = total_slice_r // cube[0]
dsp_height = height_r // cube[1]
dsp_width = width_r // cube[2]
next_slice = 0
list_slices = []
for i in range(num_file):
list1 = next_slice + np.arange(list_nslice[i])
list_slices.append(list1)
next_slice = list1[-1] + 1
# Locate slices in hdf_files given a range of requested slices
def locate_slice_chunk(slice_start, slice_stop, list_slices):
"""
Map requested slices to slice-indices in each hdf file.
Return: [[file_index0, slice_start0, slice_stop0]]
or [[file_index0, slice_start0, slice_stop0], [file_index1, slice_start1, slice_stop1]]
"""
results = []
for i, list1 in enumerate(list_slices):
result_tmp = []
for slice_idx in range(slice_start, slice_stop):
pos = np.squeeze(np.where(list1 == slice_idx)[0])
if pos.size == 1:
result_tmp.append(pos)
if len(result_tmp) > 0:
result_tmp = np.asarray(result_tmp)
results.append([i, result_tmp[0], result_tmp[-1]])
return results
print("!!! Start !!!")
time_start = timeit.default_timer()
# Open hdf_stream for saving data.
hdf_stream = losa.open_hdf_stream(output_base + "/" + output_file,
(dsp_slice, dsp_height, dsp_width), key_path)
list_idx_nslice = np.reshape(np.arange(total_slice_r), (dsp_slice, cube[0]))
dsp_method = np.mean # Use mean for downsampling
for idx in np.arange(dsp_slice):
slice_start = list_idx_nslice[idx, 0]
slice_stop = list_idx_nslice[idx, -1] + 1
slices = locate_slice_chunk(slice_start, slice_stop, list_slices)
if len(slices) == 1:
data_chunk = list_hdf_object[slices[0][0]][slices[0][1]:slices[0][2] + 1, :height_r, :width_r]
else:
data_chunk1 = list_hdf_object[slices[0][0]][slices[0][1]:slices[0][2] + 1, :height_r, :width_r]
data_chunk2 = list_hdf_object[slices[1][0]][slices[1][1]:slices[1][2] + 1, :height_r, :width_r]
data_chunk = np.concatenate((data_chunk1, data_chunk2), axis=0)
mat_dsp = data_chunk.reshape(1, cube[0], dsp_height, cube[1], dsp_width, cube[2])
mat_dsp = dsp_method(dsp_method(dsp_method(mat_dsp, axis=-1), axis=1), axis=2)
hdf_stream[idx] = mat_dsp
if idx % 200 == 0:
out_name = "0000" + str(idx)
losa.save_image(output_base +"/some_tif_files/dsp_" + out_name[-5:] + ".tif", mat_dsp[0])
time_now = timeit.default_timer()
print("Done slices up to {0}. Time cost {1}".format(slice_stop, time_now - time_start))
time_stop = timeit.default_timer()
print("All done!!! Total time cost: {}".format(time_stop - time_start))
| 39.788618
| 103
| 0.650184
|
058985110485fa21e089c569021e3e8de0a75541
| 7,412
|
py
|
Python
|
idiaptts/src/data_preparation/OpenSMILELabelGen.py
|
idiap/IdiapTTS
|
60413d6847508e269d44aa41885e668db7dfd440
|
[
"MIT"
] | 16
|
2019-05-27T09:20:40.000Z
|
2022-01-27T23:24:26.000Z
|
idiaptts/src/data_preparation/OpenSMILELabelGen.py
|
idiap/IdiapTTS
|
60413d6847508e269d44aa41885e668db7dfd440
|
[
"MIT"
] | 1
|
2019-07-02T09:53:14.000Z
|
2019-08-06T14:37:56.000Z
|
idiaptts/src/data_preparation/OpenSMILELabelGen.py
|
idiap/IdiapTTS
|
60413d6847508e269d44aa41885e668db7dfd440
|
[
"MIT"
] | 5
|
2019-06-01T18:32:12.000Z
|
2021-08-03T23:14:56.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by Bastian Schnell <bastian.schnell@idiap.ch>
#
# System imports.
import arff
import argparse
import glob
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from typing import List, Tuple
# Third-party imports.
import numpy as np
# Local source tree imports.
from idiaptts.src.data_preparation.LabelGen import LabelGen
from idiaptts.misc.normalisation.MeanStdDevExtractor import MeanStdDevExtractor
class OpenSMILELabelGen(LabelGen):
"""Create OpenSMILE features from wav files."""
@staticmethod
def gen_data(dir_in: os.PathLike,
opensmile_config_file: os.PathLike,
feature_name: str,
num_frames: int,
dir_out: os.PathLike = None,
file_id_list: os.PathLike = None,
id_list: List[str] = None,
file_ext: str = "wav",
return_dict: bool = False) -> Tuple:
if file_id_list is None:
file_id_list_name = ""
else:
id_list, file_id_list_name = OpenSMILELabelGen._get_id_list(
dir_in, file_id_list, id_list, file_ext)
if file_id_list_name is not None and file_id_list_name != "":
file_id_list_name += "-"
if return_dict:
label_dict = {}
normaliser = MeanStdDevExtractor()
for file_name in id_list:
features = OpenSMILELabelGen.extract_features(
config_file=opensmile_config_file,
file_path=os.path.join(dir_in, file_name + "." + file_ext),
num_frames=num_frames
)
if return_dict:
label_dict[file_name] = features
normaliser.add_sample(features)
if dir_out is not None:
out_file_path = os.path.join(dir_out, file_name)
OpenSMILELabelGen._save_to_npz(
file_path=out_file_path,
features=features.astype(np.float32),
feature_name=feature_name)
if dir_out is not None:
norm_file_path = os.path.join(dir_out,
file_id_list_name + feature_name)
logging.info("Write norm_prams to {}".format(norm_file_path))
normaliser.save(norm_file_path)
mean, std_dev = normaliser.get_params()
if return_dict:
return label_dict, mean, std_dev
else:
return mean, std_dev
@staticmethod
def _get_id_list(dir_in: os.PathLike, file_id_list: os.PathLike,
id_list: List[str] = None, file_ext: str = ".wav"
) -> Tuple[List[str], str]:
"""
Fill file_id_list by files in dir_in with file_ext if not given and set
an appropriate file_id_list_name.
"""
if id_list is None:
id_list = list()
filenames = glob.glob(os.path.join(dir_in, "*" + file_ext))
for filename in filenames:
id_list.append(os.path.splitext(os.path.basename(filename))[0])
file_id_list_name = "all"
else:
file_id_list_name = os.path.splitext(os.path.basename(file_id_list))[0]
return id_list, file_id_list_name
@staticmethod
def extract_features(config_file: os.PathLike, file_path: os.PathLike,
num_frames: int = None) -> np.ndarray:
"""
Extract features with SMILEExtract.
Removes first and last generated feature.
"""
tmp_dir = tempfile.mkdtemp()
path = os.path.join(tmp_dir, "test_output.arff")
try:
cmd = ["opensmile/bin/SMILExtract", "-C", config_file,
"-I", file_path, "-O", path, "-l", "1"]
logging.debug(cmd)
subprocess.check_output(cmd)
dataset = arff.load(open(path, 'r'))
data = dataset['data']
except subprocess.CalledProcessError as e:
print("SMILExtract stdout output:\n", e.output)
raise
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
if num_frames is None or num_frames == -1:
features = data
else:
len_diff = len(data) - num_frames
if len_diff > 0:
features = data[:num_frames]
else:
num_features = len(data[0])
padding = abs(len_diff) * [num_features * [0]]
features = data + padding
features = np.atleast_2d(np.asarray(features))[:, 1:-1].astype(float)
return features
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-a", "--dir_audio",
help="Directory containing the audio (wav) files.",
type=str, dest="dir_audio", required=True)
parser.add_argument('-c', '--config_file', default=None,
help='Path to the openSMILE config to use.',
required=True)
parser.add_argument("-f", "--num_frames", default=-1,
help="The features are cropped/padded to this length.",
type=int, dest="num_frames", required=False)
parser.add_argument("-i", "--file_id_list", default=None,
help="Path to text file with ids to process.",
type=str, dest="file_id_list", required=False)
parser.add_argument("--id_name", default=None,
help="Single id_name to process",
type=str, dest="id_name", required=False)
parser.add_argument("-n", "--feature_name",
help="Name of the feature used to store in npz file.",
type=str, required=True)
parser.add_argument("-o", "--dir_out",
help="Output directory to store the labels.",
type=str, dest="dir_out", required=True)
# Parse arguments
args = parser.parse_args()
dir_audio = os.path.abspath(args.dir_audio)
opensmile_config_file = os.path.abspath(args.config_file)
num_frames = int(args.num_frames)
feature_name = args.feature_name
dir_out = os.path.abspath(args.dir_out)
if args.file_id_list is not None:
file_id_list = os.path.abspath(args.file_id_list)
with open(file_id_list) as f:
id_list = f.readlines()
id_list[:] = [s.strip(' \t\n\r') for s in id_list] # Trim entries in-place.
elif args.id_name is not None:
file_id_list = None
id_list = [args.id_name]
else:
raise RuntimeError("Either file_id_list or id_name has to be given.")
assert num_frames == -1 or num_frames > 0, "num_frames has to be positive or -1."
OpenSMILELabelGen.gen_data(
dir_in=dir_audio,
dir_out=dir_out,
file_id_list=file_id_list,
id_list=id_list,
opensmile_config_file=opensmile_config_file,
feature_name=feature_name,
num_frames=num_frames,
return_dict=False
)
sys.exit(0)
if __name__ == "__main__":
main()
| 35.127962
| 85
| 0.585942
|
f3abc22a8775630e6b48620e42a4073acd5aca7d
| 22
|
py
|
Python
|
Modules/vms/stsdef/stsdef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | 2
|
2021-10-06T15:46:53.000Z
|
2022-01-26T02:58:54.000Z
|
Modules/vms/stsdef/stsdef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | null | null | null |
Modules/vms/stsdef/stsdef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | null | null | null |
from _stsdef import *
| 11
| 21
| 0.772727
|
557399b3fa3a760af478f4c2d0c9c7fa84058aa2
| 1,636
|
py
|
Python
|
HiLightFinder.py
|
zeibou/pyHiLightExtractor
|
2a47538405c826a4ee960d9dbcad34e418190aa0
|
[
"MIT"
] | 2
|
2019-01-18T13:55:05.000Z
|
2019-03-14T02:44:42.000Z
|
HiLightFinder.py
|
zeibou/pyHiLightExtractor
|
2a47538405c826a4ee960d9dbcad34e418190aa0
|
[
"MIT"
] | null | null | null |
HiLightFinder.py
|
zeibou/pyHiLightExtractor
|
2a47538405c826a4ee960d9dbcad34e418190aa0
|
[
"MIT"
] | 2
|
2020-06-24T22:37:07.000Z
|
2020-09-02T06:10:24.000Z
|
from dataclasses import dataclass
from datetime import timedelta
import struct
@dataclass
class Atom:
name: str
size: int
def yield_box(stream):
while 1:
size = stream.read(4)
if len(size) < 4 : break
n = int(struct.unpack('>I', size)[0])
name = stream.read(4)
yield Atom(name, n-8)
def move_stream_to(stream, n):
chunks = 64 * (1 << 20)
while n > chunks:
stream.seek(chunks, 1)
n -= chunks
stream.seek(n, 1)
def find_hilights(filename):
with open(filename, 'rb') as f:
for atom in yield_box(f):
if atom.name == b'moov':
for atom in yield_box(f):
if atom.name == b'udta':
for atom in yield_box(f):
if atom.name == b'HMMT':
nb_hilights = int.from_bytes(f.read(4), byteorder='big')
if nb_hilights:
return struct.unpack('>' + 'i' * nb_hilights, f.read(4 * nb_hilights))
else:
return ()
else:
move_stream_to(f, atom.size)
else:
move_stream_to(f, atom.size)
else:
move_stream_to(f, atom.size)
return ()
def print_time(time_ms):
t = timedelta(milliseconds=time_ms)
print(t)
if __name__ == '__main__':
file = "/Users/nicolas.seibert/Documents/foot/2019-01-07/GX030059.MP4"
for t in find_hilights(file):
print_time(t)
| 30.296296
| 106
| 0.487775
|
85b3c2b5432d9e9d35ab33bb518e5462d8ae0675
| 3,100
|
py
|
Python
|
categorical_embedder/embedders/vae/DiscriminativeTrainer.py
|
erelcan/categorical-embedder
|
376b8779500af2aa459c879f8e525f2ef25d6b31
|
[
"Apache-2.0"
] | 3
|
2020-12-19T10:52:58.000Z
|
2021-06-08T09:06:44.000Z
|
categorical_embedder/embedders/vae/DiscriminativeTrainer.py
|
erelcan/categorical-embedder
|
376b8779500af2aa459c879f8e525f2ef25d6b31
|
[
"Apache-2.0"
] | null | null | null |
categorical_embedder/embedders/vae/DiscriminativeTrainer.py
|
erelcan/categorical-embedder
|
376b8779500af2aa459c879f8e525f2ef25d6b31
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from keras.layers import Input, Dense
from keras.models import Model
from categorical_embedder.embedders.core.vae.VAETrainerABC import VAETrainerABC
from categorical_embedder.embedders.core.vae.concrete.VAE import VAE
from categorical_embedder.processors.DiscriminativeWrapper import DiscriminativeWrapper
from categorical_embedder.processors.HolisticCategoricalProcessor import HolisticCategoricalProcessor
from categorical_embedder.processors.SelfReturner import SelfReturner
from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function
class DiscriminativeTrainer(VAETrainerABC):
def __init__(self, num_of_categories, uniques, outer_generator, generator_info, model_info, save_info, discriminative_info):
super().__init__(DiscriminativeWrapper(HolisticCategoricalProcessor(uniques, 0, np.int, shifter_on=False, mirror_target=True), SelfReturner()), outer_generator, generator_info, model_info, save_info)
self._discriminative_info = discriminative_info
self._num_of_categories = num_of_categories
self._vocab_size = self._preprocessor.get_feature_processor().get_vocab_size()
def _get_model(self):
encoder_input = Input((self._num_of_categories, self._vocab_size), dtype="int64")
enc_dec = VAE(self._num_of_categories, self._vocab_size, self._model_info["hidden_length"],
self._model_info["encoder_latent_info"], self._model_info["encoder_layer_info"],
self._model_info["decoder_layer_info"], self._model_info["inner_loss_info"], name="main")
decoder_output, encoded = enc_dec(encoder_input)
# dense_output = Dense(10, "relu")(encoded)
# discriminative_output = Dense(self._discriminative_info["target_dim_length"], activation=self._discriminative_info["activation"], name="discriminative")(dense_output)
discriminative_output = Dense(self._discriminative_info["target_dim_length"], activation=self._discriminative_info["activation"], name="discriminative")(encoded)
model = Model(inputs=encoder_input, outputs=[decoder_output, discriminative_output])
self._main_model_artifacts["custom_objects_info"]["layer_info"] = ["VAE", "VAEEncoder", "VAEDecoder"]
self._embedder_artifacts["custom_objects_info"]["layer_info"] = ["VAEEncoder"]
return model
def _extract_embedder_model(self, model):
encoder_input = model.layers[0].output
embedding = model.get_layer("main").get_encoder()(encoder_input)
embedder_model = Model(inputs=encoder_input, outputs=embedding)
# Maybe misleading...
# Consider not providing any info in compile~
if self._model_info["has_implicit_loss"]:
embedder_model.compile(optimizer=self._model_info["optimizer"], metrics=self._model_info["metrics"]["discriminative"])
else:
embedder_model.compile(optimizer=self._model_info["optimizer"], loss=get_loss_function(self._model_info["loss_info"]), metrics=self._model_info["metrics"]["discriminative"])
return embedder_model
| 58.490566
| 207
| 0.762903
|
cf7a3c32ac5e8fc6ed68217d5d2178340aa1bde1
| 88,809
|
py
|
Python
|
venv/Lib/site-packages/django/db/models/fields/__init__.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
venv/Lib/site-packages/django/db/models/fields/__init__.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | 12
|
2019-12-26T16:40:09.000Z
|
2022-03-11T23:17:44.000Z
|
venv/Lib/site-packages/django/db/models/fields/__init__.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | 13
|
2016-02-28T00:14:23.000Z
|
2021-05-03T15:47:36.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.query_utils import QueryWrapper, RegisterLookupMixin
from django.utils import six, timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import (
RemovedInDjango20Warning, warn_about_renamed_method,
)
from django.utils.duration import duration_string
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import Promise, cached_property, curry
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty',
'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
@property
def rel(self):
warnings.warn(
"Usage of field.rel has been deprecated. Use field.remote_field instead.",
RemovedInDjango20Warning, 2)
return self.remote_field
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model=self.model):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_field(self, virtual=True)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, '_prepare'):
return value._prepare(self)
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('search', 'regex', 'iregex', 'contains',
'icontains', 'iexact', 'startswith', 'endswith',
'istartswith', 'iendswith'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type == 'isnull':
return []
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.remote_field, 'get_related_field'):
lst = [(getattr(x, self.remote_field.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
@warn_about_renamed_method(
'Field', '_get_val_from_obj', 'value_from_object',
RemovedInDjango20Warning
)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
elif not isinstance(self.max_length, six.integer_types) or self.max_length <= 0:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now,
self.has_default()]
enabled_options = [option not in (None, False)
for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super(DecimalField, self).validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on postgres, INVERAL DAY TO SECOND on Oracle, and bigint of
microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
value = force_text(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self.value_from_object(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except AttributeError:
raise TypeError(self.error_messages['invalid'] % {'value': value})
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| 36.743484
| 101
| 0.590661
|
0a26fef5f46a84fc2708656cb1c71421edea14da
| 1,735
|
py
|
Python
|
preprocess/amr_preprocess/global_node.py
|
hec44/DCGCN
|
a6e9c610d847295829a67337536769d678419ec2
|
[
"MIT"
] | 75
|
2019-03-12T04:29:40.000Z
|
2022-03-19T14:04:04.000Z
|
preprocess/amr_preprocess/global_node.py
|
hec44/DCGCN
|
a6e9c610d847295829a67337536769d678419ec2
|
[
"MIT"
] | 3
|
2020-11-06T02:07:03.000Z
|
2022-03-11T21:35:57.000Z
|
preprocess/amr_preprocess/global_node.py
|
hec44/DCGCN
|
a6e9c610d847295829a67337536769d678419ec2
|
[
"MIT"
] | 8
|
2019-08-22T07:35:30.000Z
|
2022-03-01T03:53:11.000Z
|
import argparse
def add_global_node(dirt, name):
f1 = open(dirt + name + ".amr", "r")
f2 = open(dirt + name + ".grh", "r")
h1 = open(dirt + name + ".amr_g", "w")
h2 = open(dirt + name + ".grh_g", "w")
index_list = []
node = "gnode"
for line in f1:
toks = line.strip().split()
for t in toks:
h1.write(str(t) + ' ')
h1.write(str(node) + '\n')
index_list.append(len(toks))
i = 0
for line in f2:
deps = line.strip().split()
for d in deps:
h2.write(str(d)+ ' ')
index = index_list[i]
for j in range(index):
d1 = '(' + str(index) + ',' + str(j) + ',g)'
h2.write(d1+' ')
d3 = '(' + str(index) + ',' + str(index) + ',s)'
h2.write(d3+'\n')
i += 1
def gen_amrgrh(dirt, name):
file_4 = dirt + name + ".amr_g"
file_5 = dirt + name + ".grh_g"
file_6 = dirt + name + ".amrgrh_g"
h4 = open(file_4, "r")
h5 = open(file_5, "r")
h6 = open(file_6, "w")
toks = []
deps = []
for line in h4:
seg = line.strip().split()
toks.append(seg)
for line in h5:
seg = line.strip().split()
deps.append(seg)
L = len(toks)
if len(toks) != len(deps):
print("error3")
for i in range(L):
tok = toks[i]
dep = deps[i]
for j in range(len(tok)):
h6.write(str(tok[j]) + ' ')
h6.write("\t")
for j in range(len(dep)):
h6.write(str(dep[j]) + ' ')
h6.write("\n")
if __name__ == '__main__':
# Parse input
parser = argparse.ArgumentParser(description="process AMR with the global node")
parser.add_argument('--input_dir', type=str, help='input dir')
# name_list = ["train", "test", "val"]
# dirt = "en2cs/"
args = parser.parse_args()
name_list = ["train", "test", "dev"]
for name in name_list:
add_global_node(args.input_dir, name)
gen_amrgrh(args.input_dir, name)
| 19.942529
| 81
| 0.576945
|
40fe52900060e08204e737e890780fd711786c54
| 438
|
py
|
Python
|
mapshader/tests/test_transforms.py
|
ianthomas23/mapshader
|
3a29f9f2ad355df193896a598825cef12c1e0dae
|
[
"MIT"
] | null | null | null |
mapshader/tests/test_transforms.py
|
ianthomas23/mapshader
|
3a29f9f2ad355df193896a598825cef12c1e0dae
|
[
"MIT"
] | null | null | null |
mapshader/tests/test_transforms.py
|
ianthomas23/mapshader
|
3a29f9f2ad355df193896a598825cef12c1e0dae
|
[
"MIT"
] | null | null | null |
# import json
# from os import path
# from io import BytesIO
# import pytest
# import xarray as xr
# from datashader.transfer_functions import Image
# from mapshader.sources import MapSource
# from mapshader.core import render_map
# from mapshader.core import render_geojson
# from mapshader.sources import elevation_source
# from mapshader.tests.data import DEFAULT_SOURCES_FUNCS
# TODO: add transform tests (test_transforms.py)
| 20.857143
| 56
| 0.799087
|
639d5ea6e06a700057063bc15001f10824002cb5
| 7,130
|
py
|
Python
|
preprocess.py
|
hash2430/pitchtron
|
b6dbb88774f340b46fd7630bd239f774b139c05c
|
[
"BSD-3-Clause"
] | 123
|
2020-05-12T08:41:09.000Z
|
2022-02-01T14:05:55.000Z
|
preprocess.py
|
ts0923/pitchtron
|
b6dbb88774f340b46fd7630bd239f774b139c05c
|
[
"BSD-3-Clause"
] | 12
|
2020-06-24T23:40:38.000Z
|
2022-02-10T01:41:09.000Z
|
preprocess.py
|
ts0923/pitchtron
|
b6dbb88774f340b46fd7630bd239f774b139c05c
|
[
"BSD-3-Clause"
] | 24
|
2020-05-16T23:21:14.000Z
|
2021-07-14T09:45:14.000Z
|
import argparse
import os
from tqdm import tqdm
from datasets import libri_tts, selvas_multi_lbl,selvas_multispeaker_pron, public_korean_pron, check_file_integrity, generate_mel_f0, f0_mean
from configs.korean_200113 import create_hparams
hparams = create_hparams()
# WARN: Do not use this without adding trim
# def preprocess_libri_tts(args):
# libri_tts.build_from_path(args.num_workers, tqdm=tqdm)
# WARN: Do not use this without adding trim and supporting lbl phoneme sets
# def preprocess_selvas_multi(args):
# in_dir = '/past_projects/DB/selvasai/selvasai_organized'
# out_dir = 'filelists'
# selvas_multi_lbl.build_from_path(in_dir, out_dir, args.num_workers, tqdm=tqdm)
def preprocess_selvas_multispeaker_pron(args):
# in_dir = '/past_projects/DB/selvasai/selvasai_organized'
in_dir = '/mnt/sdd1/leftout_males'
# in_dir = '/mnt/sdd1/selvas_emotion'
out_dir = 'filelists'
# in order of train-valid-text
filelists_name = [
'train_file_list_pron_sub.txt',
'valid_file_list_pron_sub.txt',
'test_file_list_pron_sub.txt'
]
selvas_multispeaker_pron.build_from_path(in_dir, out_dir, filelists_name, 4, args.num_workers, tqdm=tqdm)
# TODO: lang code is written in this procedure. Langcode==1 for korean-only case is hard-coded for now.
# TODO: This must be fixed to support english and other languages as well.
def _integrate(train_file_lists, target_train_file_list):
sources = [[] for i in range(len(train_file_lists))]
i = 0
for file_list in train_file_lists:
with open(file_list, 'r', encoding='utf-8-sig') as f:
sources[i] = f.readlines()
i += 1
# integrate meta file
lang_code = 1
with open(target_train_file_list, 'w', encoding='utf-8-sig') as f:
for i in range(len(sources)):
for j in range(len(sources[i])):
sources[i][j] = sources[i][j].rstrip() + '|{}\n'.format(str(lang_code)) # add language code
for i in range(1, len(sources)):
sources[0] += sources[i]
# shuffle or not
f.writelines(sources[0])
def preprocess_public_korean_pron(args):
# in_dir = '/mnt/sdd1/korean_public'
in_dir = '/mnt/sdd1/leftout_korean_old_male'
out_dir = 'filelists'
filelists_name = [
'train_korean_pron.txt',
'valid_korean_pron.txt',
'test_korean_pron.txt'
]
public_korean_pron.build_from_path(in_dir, out_dir, filelists_name, args.num_workers, tqdm=tqdm)
# This better not be done multithread because meta file is going to be locked and it will be inefficient.
def integrate_dataset(args):
# train_file_lists = [
# 'filelists/libritts_train_clean_100_audiopath_text_sid_shorterthan10s_atleast5min_train_filelist.txt',
# 'filelists/train_file_list.txt']
# eval_file_lists = [
# '/home/administrator/projects/pitchtron/filelists/libritts_train_clean_100_audiopath_text_sid_atleast5min_val_filelist.txt',
# '/home/administrator/projects/pitchtron/filelists/valid_file_list.txt']
# test_file_lists = [
# 'filelists/libritts_train_clean_100_audiopath_text_sid_shorterthan10s_atleast5min_test_filelist.txt',
# 'filelists/test_file_list.txt']
#
# target_train_file_list = 'filelists/libritts_selvas_multi_train.txt'
# target_eval_file_list = 'filelists/libritts_selvas_multi_eval.txt'
# target_test_file_list = 'filelists/libritts_selvas_multi_test.txt'
train_file_lists = ['/home/administrator/projects/pitchtron/filelists/train_file_list_pron.txt',
'/home/administrator/projects/pitchtron/filelists/public_korean_train_file_list_pron.txt'
]
eval_file_lists = ['/home/administrator/projects/pitchtron/filelists/valid_file_list_pron.txt',
'/home/administrator/projects/pitchtron/filelists/public_korean_valid_file_list_pron.txt'
]
test_file_lists = ['/home/administrator/projects/pitchtron/filelists/test_file_list_pron.txt',
'/home/administrator/projects/pitchtron/filelists/public_korean_test_file_list_pron.txt'
]
target_train_file_list = 'filelists/merge_korean_pron_train.txt'
target_eval_file_list = 'filelists/merge_korean_pron_valid.txt'
target_test_file_list = 'filelists/merge_korean_pron_test.txt'
# merge train lists
_integrate(train_file_lists, target_train_file_list)
# merge eval lists
_integrate(eval_file_lists, target_eval_file_list)
# merge test lists
_integrate(test_file_lists, target_test_file_list)
print('Dataset integration has been complete')
# Try opening files on the filelist and write down the files with io error.
def check_for_file_integrity(args):
lists = ['filelists/merge_korean_pron_train.txt', 'filelists/merge_korean_pron_valid.txt', 'filelists/merge_korean_pron_test.txt']
check_file_integrity.check_paths(lists, tqdm=tqdm)
def gen_mel_f0(args):
lists = ['filelists/merge_korean_pron_train.txt', 'filelists/merge_korean_pron_valid.txt', 'filelists/merge_korean_pron_test.txt']
generate_mel_f0.build_from_path(lists, hparams, tqdm=tqdm)
def preprocess_cal_f0_scale_per_training_speaker(args):
# root = '/mnt/sdd1/selvas_emotion'
# root = '/mnt/sdd1/leftout_males'
# root = '/mnt/sdd1/leftout_korean_old_male/wav_22050'
root = '/mnt/sdd1/korean_public/wav_22050'
f0_mean.build_from_path(root, hparams, tqdm=tqdm)
def main():
parser = argparse.ArgumentParser()
# parser.add_argument('--base_dir', default=os.path.expanduser('/past_projects/DB'))
# parser.add_argument('--output', default='sitec')
parser.add_argument('--dataset', required=True,
choices=['blizzard', 'ljspeech', 'sitec', 'sitec_short', 'selvas_multi', 'libri_tts', 'selvas_multispeaker_pron',
'integrate_dataset', 'public_korean_pron', 'check_file_integrity', 'generate_mel_f0', 'cal_f0_scale_per_training_speaker'])
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--num_workers', type=int, default=12)
args = parser.parse_args()
hparams = create_hparams(args.hparams)
if args.dataset == 'libri_tts':
assert(True)
print("Not implemented")
# preprocess_libri_tts(args)
elif args.dataset == 'selvas_multi':
assert(True)
print("Not implemented")
# preprocess_selvas_multi(args)
elif args.dataset == 'integrate_dataset':
integrate_dataset(args)
elif args.dataset == 'selvas_multispeaker_pron':
preprocess_selvas_multispeaker_pron(args)
elif args.dataset == 'public_korean_pron':
preprocess_public_korean_pron(args)
elif args.dataset == 'check_file_integrity':
check_for_file_integrity(args)
elif args.dataset == 'generate_mel_f0':
gen_mel_f0(args)
elif args.dataset == 'cal_f0_scale_per_training_speaker':
preprocess_cal_f0_scale_per_training_speaker(args)
if __name__ == "__main__":
main()
| 43.742331
| 156
| 0.721739
|
6ffe4ea234612ddbf57a4d9b17bef1a360e2534c
| 3,347
|
py
|
Python
|
mengheng_probability/test.py
|
bondxue/python_package_example
|
7cc171d58f9524111f1a40b23c6a8a11ba244c24
|
[
"MIT"
] | null | null | null |
mengheng_probability/test.py
|
bondxue/python_package_example
|
7cc171d58f9524111f1a40b23c6a8a11ba244c24
|
[
"MIT"
] | null | null | null |
mengheng_probability/test.py
|
bondxue/python_package_example
|
7cc171d58f9524111f1a40b23c6a8a11ba244c24
|
[
"MIT"
] | null | null | null |
import unittest
from mengheng_probability import Gaussian
from mengheng_probability import Binomial
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
self.gaussian.read_data_file('numbers.txt')
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_readdata(self):
self.assertEqual(self.gaussian.data, \
[1, 3, 99, 100, 120, 32, 330, 23, 76, 44, 31], 'data not read in correctly')
def test_meancalculation(self):
self.assertEqual(self.gaussian.calculate_mean(), \
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.assertEqual(round(self.gaussian.calculate_stdev(), 2), 92.87, 'sample standard deviation incorrect')
self.assertEqual(round(self.gaussian.calculate_stdev(0), 2), 88.55, 'population standard deviation incorrect')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947, \
'pdf function does not give expected result')
self.gaussian.calculate_mean()
self.gaussian.calculate_stdev()
self.assertEqual(round(self.gaussian.pdf(75), 5), 0.00429, \
'pdf function after calculating mean and stdev does not give expected result')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
class TestBinomialClass(unittest.TestCase):
def setUp(self):
self.binomial = Binomial(0.4, 20)
self.binomial.read_data_file('numbers_binomial.txt')
def test_initialization(self):
self.assertEqual(self.binomial.p, 0.4, 'p value incorrect')
self.assertEqual(self.binomial.n, 20, 'n value incorrect')
def test_readdata(self):
self.assertEqual(self.binomial.data, \
[0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0], 'data not read in correctly')
def test_calculatemean(self):
mean = self.binomial.calculate_mean()
self.assertEqual(mean, 8)
def test_calculatestdev(self):
stdev = self.binomial.calculate_stdev()
self.assertEqual(round(stdev, 2), 2.19)
def test_replace_stats_with_data(self):
p, n = self.binomial.replace_stats_with_data()
self.assertEqual(round(p, 3), .615)
self.assertEqual(n, 13)
def test_pdf(self):
self.assertEqual(round(self.binomial.pdf(5), 5), 0.07465)
self.assertEqual(round(self.binomial.pdf(3), 5), 0.01235)
self.binomial.replace_stats_with_data()
self.assertEqual(round(self.binomial.pdf(5), 5), 0.05439)
self.assertEqual(round(self.binomial.pdf(3), 5), 0.00472)
def test_add(self):
binomial_one = Binomial(.4, 20)
binomial_two = Binomial(.4, 60)
binomial_sum = binomial_one + binomial_two
self.assertEqual(binomial_sum.p, .4)
self.assertEqual(binomial_sum.n, 80)
if __name__ == '__main__':
unittest.main()
| 37.188889
| 118
| 0.654317
|
9bf113e8a073032998ed6a918828292cc4916c40
| 1,785
|
py
|
Python
|
python/mysite/polls/views.py
|
kylegalloway/docker-talk
|
099d1d3898707c29f6d2e8c48ab6da02b7265a2c
|
[
"MIT"
] | null | null | null |
python/mysite/polls/views.py
|
kylegalloway/docker-talk
|
099d1d3898707c29f6d2e8c48ab6da02b7265a2c
|
[
"MIT"
] | 3
|
2021-03-19T01:58:37.000Z
|
2021-06-09T18:27:19.000Z
|
python/mysite/polls/views.py
|
kylegalloway/docker-talk
|
099d1d3898707c29f6d2e8c48ab6da02b7265a2c
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| 32.454545
| 94
| 0.689076
|
8ccee1a0087b873cc70a169de85afba684f02f37
| 1,123
|
py
|
Python
|
browniebroke/users/tests/test_forms.py
|
browniebroke/browniecutter
|
b0b0d5b1633e68372c1de5d04569c2e15ee720d7
|
[
"MIT"
] | null | null | null |
browniebroke/users/tests/test_forms.py
|
browniebroke/browniecutter
|
b0b0d5b1633e68372c1de5d04569c2e15ee720d7
|
[
"MIT"
] | null | null | null |
browniebroke/users/tests/test_forms.py
|
browniebroke/browniecutter
|
b0b0d5b1633e68372c1de5d04569c2e15ee720d7
|
[
"MIT"
] | null | null | null |
import pytest
from browniebroke.users.forms import UserCreationForm
from browniebroke.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 27.390244
| 59
| 0.596616
|
e1fc0e50fccf5ce257b4a28c85e5b92c4886df42
| 1,022
|
py
|
Python
|
days/04-06-collections/Day06.py
|
GMillerA/100daysofcode-with-python-course
|
fcbb7c5ed342b344d43b355d20ca35c6fd712b4a
|
[
"MIT"
] | null | null | null |
days/04-06-collections/Day06.py
|
GMillerA/100daysofcode-with-python-course
|
fcbb7c5ed342b344d43b355d20ca35c6fd712b4a
|
[
"MIT"
] | null | null | null |
days/04-06-collections/Day06.py
|
GMillerA/100daysofcode-with-python-course
|
fcbb7c5ed342b344d43b355d20ca35c6fd712b4a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 8 12:04:50 2018
@author: Galen Miller
"""
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
def dedup_and_title_case_names(names):
"""Should return a list of names, each name appears only once"""
names = list(set(NAMES))
names = [name.title() for name in names]
return names
def sort_by_surname_desc(names):
"""Returns names list sorted desc by surname"""
names = dedup_and_title_case_names(names)
names.sort(key = lambda s: s.split()[1], reverse=True)
return names
def shortest_first_name(names):
"""Returns the shortest first name (str)"""
names = dedup_and_title_case_names(names)
return min(names, key= lambda s: s.split()[0]).split()[0]
for name in NAMES:
NAMES[name].title()
| 30.058824
| 69
| 0.638943
|
f74445b9cfed15e09d045ec81d565f311e4c5117
| 7,347
|
py
|
Python
|
finrl/commands/data_commands.py
|
solazu/FinRL-Library
|
6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea
|
[
"MIT"
] | 1
|
2021-07-18T13:31:55.000Z
|
2021-07-18T13:31:55.000Z
|
finrl/commands/data_commands.py
|
solazu/FinRL-Library
|
6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea
|
[
"MIT"
] | null | null | null |
finrl/commands/data_commands.py
|
solazu/FinRL-Library
|
6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea
|
[
"MIT"
] | null | null | null |
import logging
import sys
import yfinance
import pandas as pd
import yfinance as yf
import os
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List
from finrl.config import TimeRange, setup_utils_configuration
from finrl.data.converter import convert_ohlcv_format, convert_trades_format
from finrl.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from finrl.exceptions import OperationalException
from finrl.exchange import timeframe_to_minutes
from finrl.resolvers import ExchangeResolver
from finrl.state import RunMode
logger = logging.getLogger(__name__)
def start_download_cryptodata(args: Dict[str, Any]) -> None:
"""
Parameters:
ARGS_DOWNLOAD_DATA = {'config': ['config.json'], 'datadir': None,
'user_data_dir': None, 'pairs': None, 'pairs_file': None,
'days': 160, 'timerange': None,
'download_trades': False, 'exchange': 'binance',
'timeframes': ['1d'], 'erase': False,
'dataformat_ohlcv': None, 'dataformat_trades': None}
Returns:
Json files in user_data/data/exchange/*.json
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
if 'pairs' not in config:
raise OperationalException(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
logger.info(f"About to download pairs: {config['pairs']}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# Manual validations of relevant settings
exchange.validate_pairs(config['pairs'])
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
try:
if config.get('download_trades'):
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=config['pairs'], datadir=config['datadir'],
timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_ohlcv'])
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_download_stockdata(args: Dict[str, Any]) -> None:
"""Fetches data from Yahoo API
Parameters
----------
ticker_list, timerange,
Returns
-------
Json of data
"""
args["exchange"] = "yahoo"
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
config["datadir"] = "user_data/data/yahoo"
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.now().strftime("%Y-%m-%d")
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.fromtimestamp(timerange.stopts).strftime("%Y-%m-%d")
try:
data_df = pd.DataFrame()
for tic in config['ticker_list']:
temp_df = yf.download(tic, start=start, end=end)
temp_df.columns = [
"open",
"high",
"low",
"close",
"adjcp",
"volume",
]
temp_df["close"] = temp_df["adjcp"]
temp_df = temp_df.drop(["adjcp"], axis=1)
temp_df.to_json(f'{os.getcwd()}/{config["datadir"]}/{tic}.json')
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
"""
Convert data from one format to another
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
convert_ohlcv_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
else:
convert_trades_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
def start_list_data(args: Dict[str, Any]) -> None:
"""
List available backtest data
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
from tabulate import tabulate
from freqtrade.data.history.idatahandler import get_datahandler
dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])
paircombs = dhc.ohlcv_get_available_data(config['datadir'])
if args['pairs']:
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
print(f"Found {len(paircombs)} pair / timeframe combinations.")
groupedpair = defaultdict(list)
for pair, timeframe in sorted(paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]))):
groupedpair[pair].append(timeframe)
if groupedpair:
print(tabulate([(pair, ', '.join(timeframes)) for pair, timeframes in groupedpair.items()],
headers=("Pair", "Timeframe"),
tablefmt='psql', stralign='right'))
| 38.067358
| 99
| 0.625017
|
4e6b9343c903b603efc822fda14b1b3a69551eeb
| 4,534
|
py
|
Python
|
onto/source/graphql.py
|
billyrrr/onto
|
72733d36a2583ae4758f7cf33a5229b79773702b
|
[
"MIT"
] | 1
|
2020-10-04T10:01:45.000Z
|
2020-10-04T10:01:45.000Z
|
onto/source/graphql.py
|
billyrrr/onto
|
72733d36a2583ae4758f7cf33a5229b79773702b
|
[
"MIT"
] | null | null | null |
onto/source/graphql.py
|
billyrrr/onto
|
72733d36a2583ae4758f7cf33a5229b79773702b
|
[
"MIT"
] | null | null | null |
import asyncio
from datetime import datetime
# from graphene import ObjectType, String, Schema, Field
from aiokafka import AIOKafkaConsumer
from stargql.subscription import Subscription
from onto.source.base import Source
"""
[Start] Sample code from aiokafka
ref: https://github.com/aio-libs/aiokafka
"""
loop = asyncio.get_event_loop()
async def _kafka_subscribe(topic_name):
consumer = AIOKafkaConsumer(
topic_name,
bootstrap_servers='localhost:9092',
# group_id="my-group"
)
# Get cluster layout and join group `my-group`
await consumer.start()
# TODO: make first yield when listener has started
try:
# Consume messages
async for msg in consumer:
yield msg
# print("consumed: ", msg.topic, msg.partition, msg.offset,
# msg.key, msg.value, msg.timestamp)
finally:
# Will leave consumer group; perform autocommit if enabled.
await consumer.stop()
loop.run_until_complete(_kafka_subscribe(topic_name='my-topic'))
"""
[End] Sample code from aiokafka
Ref: https://github.com/aio-libs/aiokafka
"""
# # Every schema requires a query.
# class Query(ObjectType):
# hello = String()
#
# def resolve_hello(root, info):
# return "Hello, world!"
#
#
# class Subscription(ObjectType):
# time_of_day = String()
#
# async def subscribe_time_of_day(root, info):
# while True:
# yield datetime.now().isoformat()
# await asyncio.sleep(1)
# schema = Schema(query=Query, subscription=Subscription)
async def main(schema):
subscription = 'subscription { timeOfDay }'
result = await schema.subscribe(subscription)
async for item in result:
print(item.data['timeOfDay'])
# asyncio.run(main(schema))
#
# def _as_graphql_root_schema(attributed):
# from graphql import GraphQLSchema
#
# from onto.models.utils import _graphql_object_type_from_attributed_class
# graphql_ot = _graphql_object_type_from_attributed_class(attributed)
#
# async def sub(parent, info, **kwargs):
# pass
#
# schema = GraphQLSchema(
# query=graphql_ot,
# subscription=Subscription
# )
#
# from graphql.subscription import create_source_event_stream
# create_source_event_stream(schema=schema, document=schema.ast_node, )
# _ = subscribe(schema=schema, subscribe_field_resolver=sub)
#
# return schema
class GraphQLSource(Source):
def __init__(self, attributed_cls):
super().__init__()
self.attributed_cls = attributed_cls
def start(self):
schema = _as_graphql_root_schema(self.attributed_cls)
from stargql import GraphQL
async def on_startup():
from asyncio.queues import Queue
global q
q = Queue()
async def shutdown():
pass
app = GraphQL(
schema=schema,
on_startup=[on_startup],
on_shutdown=[shutdown]
)
return app
#
# class KafkaSource(Source):
#
# # def __init__(self, query):
# # """ Initializes a ViewMediator to declare protocols that
# # are called when the results of a query change. Note that
# # mediator.start must be called later.
# #
# # :param query: a listener will be attached to this query
# # """
# # super().__init__()
# # self.query = query
#
# def start(self):
# self._register()
#
# def _register(self):
# res = _kafka_subscribe(self.topic_name, self._call)
# asyncio.run(res)
# async for item in res:
#
# @classmethod
# def delta(cls, container):
# start = container._read_times[-2]
# end = container._read_times[-1]
# for key in container.d.keys():
# for snapshot in container.get_with_range(key, start, end):
# from onto.database import Snapshot
# prev: Snapshot = snapshot.prev
# cur: Snapshot = snapshot
# if not prev.exists:
# yield ("on_create", key, cur)
# elif prev.exists and cur.exists:
# yield ("on_update", key, cur)
# elif prev.exists and not cur.exists:
# yield ("on_delete", key, cur)
# else:
# raise ValueError
#
# def _call(self, msg):
# self._invoke_mediator(
# func_name=func_name, ref=ref, snapshot=snapshot)
| 29.251613
| 78
| 0.61491
|
979cbaf96db5ef94ba020dbe56fa54cd265c1c27
| 1,386
|
py
|
Python
|
passward_test.py
|
Bernard2030/passward-locker
|
4d654076197d58a78a9c782097c88b79c72a9653
|
[
"Unlicense"
] | null | null | null |
passward_test.py
|
Bernard2030/passward-locker
|
4d654076197d58a78a9c782097c88b79c72a9653
|
[
"Unlicense"
] | null | null | null |
passward_test.py
|
Bernard2030/passward-locker
|
4d654076197d58a78a9c782097c88b79c72a9653
|
[
"Unlicense"
] | null | null | null |
import unittest
from passward import User
class TestOne(unittest.TestCase):
"""
A test One that defines test cases for the User class.
"""
def setUp(self):
"""
A method that runs before the user test method runs
"""
self.new_user = User("BernardOpiyo", "Bro@xyz2030", "brobernard.254@gmail.com")
def tearDown(self):
"""
A method that cleans after each test case is run
"""
User.user_list = []
def test__init__(self):
"""
TestCase to test if the object is initialized as required.
"""
self.assertEqual(self.new_user.user_name, "BernardOpiyo")
self.assertEqual(self.new_user.passward, "Bro@xyz2030")
self.assertEqual(self.new_user.email, "brobernard.254@gmail.com")
def save_user(self):
"""
test case to test if the uder object is saved into the user_list
"""
self.new_user.save_user()
self.assertEqual(len(User.user_list), 1)
def save_multiple_users(self):
"""
Test to check if we can save more than one user
"""
self.new_user.save_user()
test_user = User("Test", "user", "test@user.com")
test_user.save_user()
self.assertEqual(len(User.user_list), 2)
def display_users(self):
"""
Method to return a list of users
"""
self.assertEqual(User.dispalay_users(),User.user_list)
if __name__ == '__main__':
unittest.main()
| 23.896552
| 83
| 0.655844
|
7f5e0d6950bc62db0cd14c604f72f95143dd7275
| 5,309
|
py
|
Python
|
emb2emb_autoencoder.py
|
jczestochowska/emb2emb
|
92f05dcbff529c264ec7ff786a3e82b3a3e9f42f
|
[
"MIT"
] | null | null | null |
emb2emb_autoencoder.py
|
jczestochowska/emb2emb
|
92f05dcbff529c264ec7ff786a3e82b3a3e9f42f
|
[
"MIT"
] | 1
|
2021-11-30T12:52:43.000Z
|
2021-12-01T14:48:36.000Z
|
emb2emb_autoencoder.py
|
jczestochowska/emb2emb
|
92f05dcbff529c264ec7ff786a3e82b3a3e9f42f
|
[
"MIT"
] | 1
|
2021-11-24T14:02:00.000Z
|
2021-11-24T14:02:00.000Z
|
from torch.nn.utils.rnn import pad_sequence
from autoencoders.autoencoder import AutoEncoder
from autoencoders.rnn_encoder import RNNEncoder
from autoencoders.rnn_decoder import RNNDecoder
from emb2emb.encoding import Encoder, Decoder
from tokenizers import CharBPETokenizer, SentencePieceBPETokenizer
from emb2emb.utils import Namespace
import torch
import os
import json
import copy
HUGGINGFACE_TOKENIZERS = ["CharBPETokenizer", "SentencePieceBPETokenizer"]
def tokenize(s):
# TODO: more sophisticated tokenization
return s.split()
def get_tokenizer(tokenizer, location='bert-base-uncased'):
# TODO: do we need to pass more options to the file?
tok = eval(tokenizer)(vocab_file=location + '-vocab.json',
merges_file=location + '-merges.txt')
tok.add_special_tokens(["[PAD]", "<unk>", "<SOS>", "<EOS>"])
return tok
def get_autoencoder(config):
if os.path.exists(config["default_config"]):
with open(config["default_config"]) as f:
model_config_dict = json.load(f)
else:
model_config_dict = {}
with open(os.path.join(config["modeldir"], "config.json")) as f:
orig_model_config = json.load(f)
model_config_dict.update(orig_model_config)
model_config = Namespace()
model_config.__dict__.update(model_config_dict)
tokenizer = get_tokenizer(
model_config.tokenizer, model_config.tokenizer_location)
model_config.__dict__["vocab_size"] = tokenizer.get_vocab_size()
model_config.__dict__["sos_idx"] = tokenizer.token_to_id("<SOS>")
model_config.__dict__["eos_idx"] = tokenizer.token_to_id("<EOS>")
model_config.__dict__["unk_idx"] = tokenizer.token_to_id("<unk>")
model_config.__dict__["device"] = config["device"]
encoder_config, decoder_config = copy.deepcopy(
model_config), copy.deepcopy(model_config)
encoder_config.__dict__.update(model_config.__dict__[model_config.encoder])
encoder_config.__dict__["tokenizer"] = tokenizer
decoder_config.__dict__.update(model_config.__dict__[model_config.decoder])
if model_config.encoder == "RNNEncoder":
encoder = RNNEncoder(encoder_config)
if model_config.decoder == "RNNDecoder":
decoder = RNNDecoder(decoder_config)
model = AutoEncoder(encoder, decoder, tokenizer, model_config)
checkpoint = torch.load(os.path.join(
config["modeldir"], model_config.model_file), map_location=config["device"])
model.load_state_dict(checkpoint["model_state_dict"])
return model
class AEEncoder(Encoder):
def __init__(self, config):
super(AEEncoder, self).__init__(config)
self.device = config["device"]
self.model = get_autoencoder(config)
self.use_lookup = self.model.encoder.variational
def _prepare_batch(self, indexed, lengths):
X = pad_sequence([torch.tensor(index_list, device=self.device)
for index_list in indexed], batch_first=True, padding_value=0)
lengths, idx = torch.sort(torch.tensor(
lengths, device=self.device).long(), descending=True)
return X[idx], lengths, idx
def _undo_batch(self, encoded, sort_idx):
ret = [[] for _ in range(encoded.shape[0])]
for i, c in zip(sort_idx, range(encoded.shape[0])):
ret[i] = encoded[c]
return torch.stack(ret)
def encode(self, S_list):
indexed = [self.model.tokenizer.encode(
"<SOS>" + s + "<EOS>").ids for s in S_list]
lengths = [len(i) for i in indexed]
X, X_lens, sort_idx = self._prepare_batch(indexed, lengths)
encoded = self.model.encode(X, X_lens)
# Since _prepare_batch sorts by length, we will need to undo this.
encoded = self._undo_batch(encoded, sort_idx)
return encoded, lengths
class AEDecoder(Decoder):
def __init__(self, config):
super(AEDecoder, self).__init__()
self.device = config["device"]
self.model = get_autoencoder(config)
def _prepare_batch(self, indexed, lengths):
X = pad_sequence([torch.tensor(index_list, device=self.device)
for index_list in indexed], batch_first=True, padding_value=0)
#lengths, idx = torch.sort(torch.tensor(lengths, device=self.device).long(), descending=True)
# return X[idx], lengths, idx
lengths = torch.tensor(lengths, device=self.device).long()
return X, lengths
def _encode(self, S_list):
indexed = [self.model.tokenizer.encode(
"<SOS>" + s + "<EOS>").ids for s in S_list]
lengths = [len(i) for i in indexed]
X, X_lens = self._prepare_batch(indexed, lengths)
return X, X_lens
def predict(self, S_batch, target_batch=None, batch_lengths=None):
if self.training:
target_batch, target_length = self._encode(target_batch)
out = self.model.decode_training(
S_batch, target_batch, target_length)
return out, target_batch
else:
return self.model.decode(S_batch, beam_width=15, batch_lengths=batch_lengths)
def prediction_to_text(self, predictions):
predictions = [self.model.tokenizer.decode(p, skip_special_tokens=True) for p in predictions]
return predictions
| 38.194245
| 101
| 0.678847
|
89028b731acff1b6580f561630214fa7b02bcde9
| 2,289
|
py
|
Python
|
peerdid/core/jwk_okp.py
|
sicpa-dlab/peer-did-python
|
c63461860891d7c111abb6b24a51f23dad845a74
|
[
"Apache-2.0"
] | 3
|
2021-09-04T19:31:12.000Z
|
2022-01-28T12:51:27.000Z
|
peerdid/core/jwk_okp.py
|
sicpa-dlab/peer-did-python
|
c63461860891d7c111abb6b24a51f23dad845a74
|
[
"Apache-2.0"
] | 1
|
2021-09-03T07:23:12.000Z
|
2021-09-03T07:23:12.000Z
|
peerdid/core/jwk_okp.py
|
sicpa-dlab/peer-did-python
|
c63461860891d7c111abb6b24a51f23dad845a74
|
[
"Apache-2.0"
] | 3
|
2021-08-02T12:56:46.000Z
|
2021-09-28T09:18:37.000Z
|
import json
from peerdid.core.utils import urlsafe_b64encode, urlsafe_b64decode
from peerdid.errors import MalformedPeerDIDDocError
from peerdid.types import (
VerificationMethodTypePeerDID,
VerificationMethodTypeAgreement,
VerificationMethodTypeAuthentication,
VerificationMaterialPeerDID,
VerificationMaterialAuthentication,
VerificationMaterialAgreement,
)
def public_key_to_jwk_dict(
public_key: bytes, ver_method_type: VerificationMethodTypePeerDID
):
x = urlsafe_b64encode(public_key).decode("utf-8")
if ver_method_type == VerificationMethodTypeAgreement.JSON_WEB_KEY_2020:
crv = "X25519"
elif ver_method_type == VerificationMethodTypeAuthentication.JSON_WEB_KEY_2020:
crv = "Ed25519"
else:
raise ValueError("Unsupported JWK type: " + ver_method_type.value)
return {
"kty": "OKP",
"crv": crv,
"x": x,
}
def jwk_key_to_bytes(ver_material: VerificationMaterialPeerDID) -> bytes:
jwk_dict = (
json.loads(ver_material.value)
if isinstance(ver_material.value, str)
else ver_material.value
)
if "crv" not in jwk_dict:
raise ValueError("Invalid JWK key - no 'crv' fields: " + ver_material.value)
if "x" not in jwk_dict:
raise ValueError("Invalid JWK key - no 'x' fields: " + ver_material.value)
crv = jwk_dict["crv"]
if (
isinstance(ver_material, VerificationMaterialAuthentication)
and crv != "Ed25519"
):
raise TypeError(
"Invalid JWK key type - authentication expected: " + ver_material.value
)
if isinstance(ver_material, VerificationMaterialAgreement) and crv != "X25519":
raise TypeError(
"Invalid JWK key type - key agreement expected: " + ver_material.value
)
value = jwk_dict["x"]
return urlsafe_b64decode(value.encode())
def get_verification_method_type(jwk_dict: dict) -> VerificationMethodTypePeerDID:
if "crv" not in jwk_dict:
raise MalformedPeerDIDDocError("No 'crv' field in JWK {}".format(jwk_dict))
crv = jwk_dict["crv"]
return (
VerificationMethodTypeAgreement.JSON_WEB_KEY_2020
if crv == "X25519"
else VerificationMethodTypeAuthentication.JSON_WEB_KEY_2020
)
| 31.791667
| 84
| 0.696374
|
801f10984fe3218839d8d1a2ae2be357b38020ce
| 15,593
|
py
|
Python
|
artemis/general/nested_structures.py
|
wouterkool/artemis
|
5ac3143d22ba2e7addc93396d059110104096233
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
artemis/general/nested_structures.py
|
wouterkool/artemis
|
5ac3143d22ba2e7addc93396d059110104096233
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
artemis/general/nested_structures.py
|
wouterkool/artemis
|
5ac3143d22ba2e7addc93396d059110104096233
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2018-11-25T12:48:03.000Z
|
2018-11-25T12:48:03.000Z
|
from collections import OrderedDict
import numpy as np
from six import string_types, next
from artemis.general.should_be_builtins import all_equal
__author__ = 'peter'
_immutible_types = (int, float, bool, type(None))+string_types
def flatten_struct(struct, primatives = (int, float, np.ndarray, type(None), bool)+string_types, custom_handlers = {},
break_into_objects = True, detect_duplicates = True, first_dict_is_namespace=False, memo = None):
"""
Given some nested struct, return a list<*(str, primative)>, where primative
is some some kind of object that you don't break down any further, and str is a
string representation of how you would access that propery from the root object.
:param struct: Something, anything.
:param primatives: A list of classes that will not be broken into.
:param custum_handlers: A dict<type:func> where func has the form data = func(obj). These
will be called if the type of the struct is in the dict of custom handlers.
:param break_into_objects: True if you want to break into objects to see what's inside.
:return: list<*(str , primative)>
"""
if memo is None:
memo = {}
if isinstance(struct, primatives):
return [(None, struct)]
if not isinstance(struct, _immutible_types):
if id(struct) in memo:
return [(None, memo[id(struct)])]
elif detect_duplicates:
memo[id(struct)] = 'Already Seen object at %s' % hex(id(struct))
if isinstance(struct, tuple(custom_handlers.keys())):
handler = custom_handlers[custom_handlers.keys()[[isinstance(struct, t) for t in custom_handlers].index(True)]]
return [(None, handler(struct))]
elif isinstance(struct, dict):
return [
(("[{}]{}").format(("'{}'".format(key) if isinstance(key, string_types) else key), subkey if subkey is not None else ''), v) if not first_dict_is_namespace else
(("{}{}").format(key, subkey if subkey is not None else ''), v)
for key in (struct.keys() if isinstance(struct, OrderedDict) else sorted(struct.keys(), key = str))
for subkey, v in flatten_struct(struct[key], custom_handlers=custom_handlers, primatives=primatives, break_into_objects=break_into_objects, memo=memo, detect_duplicates=detect_duplicates)
]
elif isinstance(struct, (list, tuple)):
return [("[%s]%s" % (i, subkey if subkey is not None else ''), v)
for i, value in enumerate(struct)
for subkey, v in flatten_struct(value, custom_handlers=custom_handlers, primatives=primatives, break_into_objects=break_into_objects, memo=memo, detect_duplicates=detect_duplicates)
]
elif break_into_objects: # It's some kind of object, lets break it down.
return [(".%s%s" % (key, subkey if subkey is not None else ''), v)
for key in sorted(struct.__dict__.keys(), key = str)
for subkey, v in flatten_struct(struct.__dict__[key], custom_handlers=custom_handlers, primatives=primatives, break_into_objects=break_into_objects, memo=memo, detect_duplicates=detect_duplicates)
]
else:
return [(None, memo[id(struct)])]
_primitive_containers = (list, tuple, dict, set)
def _is_primitive_container(obj):
return isinstance(obj, _primitive_containers)
def get_meta_object(data_object, is_container_func = _is_primitive_container):
"""
Given an arbitrary data structure, return a "meta object" which is the same structure, except all non-container
objects are replaced by their types.
e.g.
get_meta_obj([1, 2, {'a':(3, 4), 'b':['hey', 'yeah']}, 'woo']) == [int, int, {'a':(int, int), 'b':[str, str]}, str]
:param data_object: A data object with arbitrary nested structure
:param is_container_func: A callback which returns True if an object is to be considered a container and False otherwise
:return:
"""
if is_container_func(data_object):
if isinstance(data_object, (list, tuple, set)):
return type(data_object)(get_meta_object(x, is_container_func=is_container_func) for x in data_object)
elif isinstance(data_object, dict):
return type(data_object)((k, get_meta_object(v, is_container_func=is_container_func)) for k, v in data_object.items())
else:
return type(data_object)
class NestedType(object):
"""
An object which represents the type of an arbitrarily nested data structure. It can be constructed directly
from a nested type descriptor, or indirectly using the NestedType.from_data(...) constructor.
For example
NestedType.from_data([1, 2, {'a':(3, 4.), 'b':'c'}]) == NestedType([int, int, {'a':(int, float), 'b':str}])
"""
def __init__(self, meta_object):
"""
:param meta_object: A nested type descriptor. See docstring and tests for examples.
"""
self.meta_object = meta_object
def is_type_for(self, data_object):
return get_meta_object(data_object)==self.meta_object
def check_type(self, data_object):
"""
Assert that the data_object has a format matching this NestedType. Throw a TypeError if it does not.
:param data_object:
:return:
"""
if not self.is_type_for(data_object): # note: we'd like to switch this to isnestedinstance
raise TypeError('The data object has type {}, which does not match this format: {}'.format(NestedType.from_data(data_object), self))
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.meta_object)
def __eq__(self, other):
return self.meta_object == other.meta_object
def get_leaves(self, data_object, check_types = True, is_container_func = _is_primitive_container):
"""
:param data_object: Given a nested object, get the "leaf" values in Depth-First Order
:return: A list of leaf values.
"""
if check_types:
self.check_type(data_object)
return get_leaf_values(data_object, is_container_func=is_container_func)
def expand_from_leaves(self, leaves, check_types = True, assert_fully_used=True, is_container_func = _is_primitive_container):
"""
Given an iterator of leaf values, fill the meta-object represented by this type.
:param leaves: An iteratable over leaf values
:param check_types: Assert that the data types match those of the original object
:param assert_fully_used: Assert that all the leaf values are used
:return: A nested object, filled with the leaf data, whose structure is represented in this NestedType instance.
"""
return _fill_meta_object(self.meta_object, (x for x in leaves), check_types=check_types, assert_fully_used=assert_fully_used, is_container_func=is_container_func)
@staticmethod
def from_data(data_object, is_container_func = _is_primitive_container):
"""
:param data_object: A nested data object
:param is_container_func: A callback which returns True if an object is to be considered a container and False otherwise
:return: A NestedType object
"""
return NestedType(get_meta_object(data_object, is_container_func=is_container_func))
def isnestedinstance(data, meta_obj):
"""
Check if the data is
:param data:
:param meta_obj:
:return:
"""
raise NotImplementedError()
def get_leaf_values(data_object, is_container_func = _is_primitive_container):
"""
Collect leaf values of a nested data_obj in Depth-First order.
e.g.
>>> get_leaf_values([6]+[{'x': 3, 'y': [i, 'aaa']} for i in xrange(4)])
[6, 3, 0, 'aaa', 3, 1, 'aaa', 3, 2, 'aaa', 3, 3, 'aaa']
Caution: If your data contains dicts, you may not get the same order of results when you call this function with
different dict objects containing the same data. Python only guarantees that a given dict will always iterate in
the same order so long as it is not modified. See https://docs.python.org/2/library/stdtypes.html#dict.items
:param data_object: An arbitrary nested data object
:param is_container_func: A callback which returns True if an object is to be considered a container and False otherwise
:return: A list of leaf values.
"""
leaf_values = []
if is_container_func(data_object):
if isinstance(data_object, (list, tuple)):
leaf_values += [val for x in data_object for val in get_leaf_values(x, is_container_func=is_container_func)]
elif isinstance(data_object, OrderedDict):
leaf_values += [val for k, x in data_object.items() for val in get_leaf_values(x, is_container_func=is_container_func)]
elif isinstance(data_object, dict):
leaf_values += [val for k in sorted(data_object.keys(), key = str) for val in get_leaf_values(data_object[k], is_container_func=is_container_func)]
else:
raise Exception('Have no way to consistently extract leaf values from a {}'.format(data_object))
return leaf_values
else:
return [data_object]
def _fill_meta_object(meta_object, data_iteratable, assert_fully_used = True, check_types = True, is_container_func = _is_primitive_container):
"""
Fill the data from the iterable into the meta_object.
:param meta_object: A nested type descripter. See NestedType init
:param data_iteratable: The iterable data object
:param assert_fully_used: Assert that we actually get through all the items in the iterable
:param is_container_func: A callback which returns True if an object is to be considered a container and False otherwise
:return: The filled object
"""
try:
if is_container_func(meta_object):
if isinstance(meta_object, (list, tuple, set)):
filled_object = type(meta_object)(_fill_meta_object(x, data_iteratable, assert_fully_used=False, check_types=check_types, is_container_func=is_container_func) for x in meta_object)
elif isinstance(meta_object, OrderedDict):
filled_object = type(meta_object)((k, _fill_meta_object(val, data_iteratable, assert_fully_used=False, check_types=check_types, is_container_func=is_container_func)) for k, val in meta_object.items())
elif isinstance(meta_object, dict):
filled_object = type(meta_object)((k, _fill_meta_object(meta_object[k], data_iteratable, assert_fully_used=False, check_types=check_types, is_container_func=is_container_func)) for k in sorted(meta_object.keys(), key=str))
else:
raise Exception('Cannot handle container type: "{}"'.format(type(meta_object)))
else:
next_data = next(data_iteratable)
if check_types and meta_object is not type(next_data):
raise TypeError('The type of the data object: {} did not match type from the meta object: {}'.format(type(next_data), meta_object))
filled_object = next_data
except StopIteration:
raise TypeError('The data iterable you were going through ran out before the object {} could be filled.'.format(meta_object))
if assert_fully_used:
try:
next(data_iteratable)
raise TypeError('It appears that the data object you were using to fill your meta object had more data than could fit.')
except StopIteration:
pass
return filled_object
def nested_map(func, *nested_objs, **kwargs):
"""
An equivalent of pythons built-in map, but for nested objects. This function crawls the object and applies func
to the leaf nodes.
:param func: A function of the form new_leaf_val = func(old_leaf_val)
:param nested_obj: A nested object e.g. [1, 2, {'a': 3, 'b': (3, 4)}, 5]
:param check_types: Assert that the new leaf types match the old leaf types (False by default)
:param is_container_func: A callback which returns True if an object is to be considered a container and False otherwise
:return: A nested objectect with the same structure, but func applied to every value.
"""
is_container_func = kwargs['is_container_func'] if 'is_container_func' in kwargs else _is_primitive_container
check_types = kwargs['check_types'] if 'check_types' in kwargs else False
assert len(nested_objs)>0, 'nested_map requires at least 2 args'
assert callable(func), 'func must be a function with one argument.'
nested_types = [NestedType.from_data(nested_obj, is_container_func=is_container_func) for nested_obj in nested_objs]
assert all_equal(nested_types), "The nested objects you provided had different data structures:\n{}".format('\n'.join(str(s) for s in nested_types))
leaf_values = zip(*[nested_type.get_leaves(nested_obj, is_container_func=is_container_func, check_types=check_types) for nested_type, nested_obj in zip(nested_types, nested_objs)])
new_leaf_values = [func(*v) for v in leaf_values]
new_nested_obj = nested_types[0].expand_from_leaves(new_leaf_values, check_types=check_types, is_container_func=is_container_func)
return new_nested_obj
def get_nested_value(data_object, key_chain):
if len(key_chain)>0:
return get_nested_value(data_object[key_chain[0]], key_chain=key_chain[1:])
else:
return data_object
class ExpandingDict(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = ExpandingDict()
return dict.__getitem__(self, key)
class ExpandingOrderedDict(OrderedDict):
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
self[key] = ExpandingDict()
return OrderedDict.__getitem__(self, key)
def expand_struct(struct):
expanded_struct = ExpandingDict()
for k in struct.keys():
exec('expanded_struct%s = struct["%s"]' % (k, k))
return expanded_struct
def seqstruct_to_structseq(seqstruct, as_arrays=False):
"""
Turn a sequence of identically-structured nested objects into a nested object of sequences.
:param seqstruct: A sequence (list or tuple) of nested objects with similar format
:param as_arrays: Turn the output sequences into numpy arrays
:return: A nested object with sequences
For example, if you go:
signal_seqs = seqstruct_to_structseq(seq_signals)
Then
frame_number = 5
seq_signals[frame_number]['inputs']['camera'] == signal_seqs['inputs']['camera'][frame_number]
"""
if len(seqstruct)==0:
return []
nested_type = NestedType.from_data(seqstruct[0])
leaf_data = [nested_type.get_leaves(s) for s in seqstruct]
batch_leaf_data = [np.array(d) for d in zip(*leaf_data)] if as_arrays else zip(*leaf_data)
structseq = nested_type.expand_from_leaves(leaves = batch_leaf_data, check_types=False)
return structseq
def structseq_to_seqstruct(structseq):
"""
Turn a nested object of sequences into a sequence of identically-structured nested objects.
This is the inverse of seqstruct_to_structseq
:param structseq: A nested object with sequences
:return: A sequence (list or tuple) of nested objects with similar format
"""
nested_type = NestedType.from_data(structseq)
leaf_data = nested_type.get_leaves(structseq, check_types=False)
sequence = zip(*leaf_data)
seqstruct = [nested_type.expand_from_leaves(s, check_types=False) for s in sequence]
return seqstruct
| 46.825826
| 238
| 0.698903
|
8a4ac8e242fa7132266ede9e31a7262cd69c8de5
| 8,703
|
py
|
Python
|
1.0.2/tools/makeboards.py
|
UM-battery-lab/Longan-RP2040
|
81f2f50eac4149cc608acf9662f64641095d49c6
|
[
"MIT"
] | null | null | null |
1.0.2/tools/makeboards.py
|
UM-battery-lab/Longan-RP2040
|
81f2f50eac4149cc608acf9662f64641095d49c6
|
[
"MIT"
] | null | null | null |
1.0.2/tools/makeboards.py
|
UM-battery-lab/Longan-RP2040
|
81f2f50eac4149cc608acf9662f64641095d49c6
|
[
"MIT"
] | 1
|
2022-02-15T16:22:47.000Z
|
2022-02-15T16:22:47.000Z
|
#!/usr/bin/env python3
def BuildFlashMenu(name, flashsize, fssizelist):
for fssize in fssizelist:
if fssize == 0:
fssizename = "no FS"
elif fssize < 1024 * 1024:
fssizename = "Sketch: %dKB, FS: %dKB" % ((flashsize - fssize) / 1024, fssize / 1024)
else:
fssizename = "Sketch: %dMB, FS: %dMB" % ((flashsize - fssize) / (1024 * 1024), fssize / (1024 * 1024))
mn="%d_%d" % (flashsize, fssize)
print("%s.menu.flash.%s=%dMB (%s)" % (name, mn, flashsize / (1024 * 1024), fssizename))
print("%s.menu.flash.%s.upload.maximum_size=%d" % (name, mn, flashsize - 4096 - fssize))
print("%s.menu.flash.%s.build.flash_length=%d" % (name, mn, flashsize - 4096 - fssize))
print("%s.menu.flash.%s.build.eeprom_start=%d" % (name, mn, int("0x10000000",0) + flashsize - 4096))
print("%s.menu.flash.%s.build.fs_start=%d" % (name, mn, int("0x10000000",0) + flashsize - 4096 - fssize))
print("%s.menu.flash.%s.build.fs_end=%d" % (name, mn, int("0x10000000",0) + flashsize - 4096))
def BuildDebugPort(name):
print("%s.menu.dbgport.Disabled=Disabled" % (name))
print("%s.menu.dbgport.Disabled.build.debug_port=" % (name))
for p in ["Serial", "Serial1", "Serial2"]:
print("%s.menu.dbgport.%s=%s" % (name, p, p))
print("%s.menu.dbgport.%s.build.debug_port=-DDEBUG_RP2040_PORT=%s" % (name, p, p))
def BuildDebugLevel(name):
for l in [ ("None", ""), ("Core", "-DDEBUG_RP2040_CORE"), ("SPI", "-DDEBUG_RP2040_SPI"), ("Wire", "-DDEBUG_RP2040_WIRE"),
("All", "-DDEBUG_RP2040_WIRE -DDEBUG_RP2040_SPI -DDEBUG_RP2040_CORE"), ("NDEBUG", "-DNDEBUG") ]:
print("%s.menu.dbglvl.%s=%s" % (name, l[0], l[0]))
print("%s.menu.dbglvl.%s.build.debug_level=%s" % (name, l[0], l[1]))
def BuildFreq(name):
for f in [ 125, 50, 100, 133, 150, 175, 200, 225, 250, 275, 300]:
warn = ""
if f > 133: warn = " (Overclock)"
print("%s.menu.freq.%s=%s MHz%s" % (name, f, f, warn))
print("%s.menu.freq.%s.build.f_cpu=%dL" % (name, f, f * 1000000))
def BuildBoot(name):
for l in [ ("Generic SPI /2", "boot2_generic_03h_2_padded_checksum"), ("Generic SPI /4", "boot2_generic_03h_4_padded_checksum"),
("IS25LP080 QSPI /2", "boot2_is25lp080_2_padded_checksum"), ("IS25LP080 QSPI /4", "boot2_is25lp080_4_padded_checksum"),
("W25Q080 QSPI /2", "boot2_w25q080_2_padded_checksum"), ("W25Q080 QSPI /4", "boot2_w25q080_4_padded_checksum"),
("W25X10CL QSPI /2", "boot2_w25x10cl_2_padded_checksum"), ("W25X10CL QSPI /4", "boot2_w25x10cl_4_padded_checksum") ]:
print("%s.menu.boot2.%s=%s" % (name, l[1], l[0]))
print("%s.menu.boot2.%s.build.boot2=%s" % (name, l[1], l[1]))
def BuildUSBStack(name):
print("%s.menu.usbstack.picosdk=Pico SDK" % (name))
print('%s.menu.usbstack.picosdk.build.usbstack_flags="-I{runtime.platform.path}/tools/libpico"' % (name))
print("%s.menu.usbstack.tinyusb=Adafruit TinyUSB" % (name))
print('%s.menu.usbstack.tinyusb.build.usbstack_flags=-DUSE_TINYUSB "-I{runtime.platform.path}/libraries/Adafruit_TinyUSB_Arduino/src/arduino"' % (name))
def BuildWithoutUSBStack(name):
print("%s.menu.usbstack.nousb=No USB" % (name))
print('%s.menu.usbstack.nousb.build.usbstack_flags="-DNO_USB -DDISABLE_USB_SERIAL -I{runtime.platform.path}/tools/libpico"' % (name))
def BuildHeader(name, vendor_name, product_name, vidtouse, pidtouse, vid, pid, boarddefine, variant, uploadtool, flashsize, ramsize, boot2):
prettyname = vendor_name + " " + product_name
print()
print("# -----------------------------------")
print("# %s" % (prettyname))
print("# -----------------------------------")
print("%s.name=%s" % (name, prettyname))
print("%s.vid.0=%s" % (name, vidtouse))
print("%s.pid.0=%s" % (name, pidtouse))
print("%s.build.usbpid=-DSERIALUSB_PID=%s" % (name, pid))
print("%s.build.board=%s" % (name, boarddefine))
print("%s.build.mcu=cortex-m0plus" % (name))
print("%s.build.variant=%s" % (name, variant))
print("%s.upload.tool=%s" % (name, uploadtool))
print("%s.upload.maximum_size=%d" % (name, flashsize))
print("%s.upload.maximum_data_size=%d" % (name, ramsize))
print("%s.upload.wait_for_upload_port=true" % (name))
print("%s.upload.erase_cmd=" % (name))
print("%s.serial.disableDTR=false" % (name))
print("%s.serial.disableRTS=false" % (name))
print("%s.build.f_cpu=125000000" % (name))
print("%s.build.led=" % (name))
print("%s.build.core=rp2040" % (name))
print("%s.build.mcu=rp2040" % (name))
print("%s.build.ldscript=memmap_default.ld" % (name))
print("%s.build.ram_length=%dk" % (name, ramsize / 1024))
print("%s.build.boot2=%s" % (name, boot2))
print("%s.build.vid=%s" % (name, vid))
print("%s.build.pid=%s" % (name, pid))
print('%s.build.usb_manufacturer="%s"' % (name, vendor_name))
print('%s.build.usb_product="%s"' % (name, product_name))
def BuildGlobalMenuList():
print("menu.BoardModel=Model")
print("menu.flash=Flash Size")
print("menu.freq=CPU Speed")
print("menu.dbgport=Debug Port")
print("menu.dbglvl=Debug Level")
print("menu.boot2=Boot Stage 2")
print("menu.usbstack=USB Stack")
def MakeBoard(name, vendor_name, product_name, vid, pid, boarddefine, flashsizemb, boot2):
for a, b, c in [ ["", "", "uf2conv"], ["picoprobe", " (Picoprobe)", "picoprobe"], ["picodebug", " (pico-debug)", "picodebug"]]:
n = name + a
p = product_name + b
fssizelist = [ 0, 64 * 1024, 128 * 1024, 256 * 1024, 512 * 1024 ]
for i in range(1, flashsizemb):
fssizelist.append(i * 1024 * 1024)
vidtouse = vid;
ramsizekb = 256;
if a == "picoprobe":
pidtouse = '0x0004'
elif a == "picodebug":
vidtouse = '0x1209'
pidtouse = '0x2488'
ramsizekb = 240;
else:
pidtouse = pid
BuildHeader(n, vendor_name, p, vidtouse, pidtouse, vid, pid, boarddefine, name, c, flashsizemb * 1024 * 1024, ramsizekb * 1024, boot2)
if name == "generic":
BuildFlashMenu(n, 2*1024*1024, [0, 1*1024*1024])
BuildFlashMenu(n, 4*1024*1024, [0, 2*1024*1024])
BuildFlashMenu(n, 8*1024*1024, [0, 4*1024*1024])
BuildFlashMenu(n, 16*1024*1024, [0, 8*1024*1024])
else:
BuildFlashMenu(n, flashsizemb * 1024 * 1024, fssizelist)
BuildFreq(n)
BuildDebugPort(n)
BuildDebugLevel(n)
if a == "picodebug":
BuildWithoutUSBStack(n)
else:
BuildUSBStack(n)
if name == "generic":
BuildBoot(n)
BuildGlobalMenuList()
MakeBoard("rpipico", "Raspberry Pi", "Pico", "0x2e8a", "0x000a", "RASPBERRY_PI_PICO", 2, "boot2_w25q080_2_padded_checksum")
MakeBoard("adafruit_feather", "Adafruit", "Feather RP2040", "0x239a", "0x80f1", "ADAFRUIT_FEATHER_RP2040", 8, "boot2_w25x10cl_4_padded_checksum")
MakeBoard("adafruit_itsybitsy", "Adafruit", "ItsyBitsy RP2040", "0x239a", "0x80fd", "ADAFRUIT_ITSYBITSY_RP2040", 8, "boot2_w25q080_2_padded_checksum")
MakeBoard("adafruit_qtpy", "Adafruit", "QT Py RP2040", "0x239a", "0x80f7", "ADAFRUIT_QTPY_RP2040", 8, "boot2_generic_03h_4_padded_checksum")
MakeBoard("adafruit_stemmafriend", "Adafruit", "STEMMA Friend RP2040", "0x239a", "0x80e3", "ADAFRUIT_STEMMAFRIEND_RP2040", 8, "boot2_w25q080_2_padded_checksum")
MakeBoard("adafruit_trinkeyrp2040qt", "Adafruit", "Trinkey RP2040 QT", "0x239a", "0x8109", "ADAFRUIT_TRINKEYQT_RP2040", 8, "boot2_w25q080_2_padded_checksum")
MakeBoard("adafruit_macropad2040", "Adafruit", "MacroPad RP2040", "0x239a", "0x8107", "ADAFRUIT_MACROPAD_RP2040", 8, "boot2_w25q080_2_padded_checksum")
MakeBoard("arduino_nano_connect", "Arduino", "Nano RP2040 Connect", "0x2341", "0x0058", "ARDUINO_NANO_RP2040_CONNECT", 16, "boot2_w25q080_2_padded_checksum")
MakeBoard("sparkfun_promicrorp2040", "SparkFun", "ProMicro RP2040", "0x1b4f", "0x0026", "SPARKFUN_PROMICRO_RP2040", 16, "boot2_generic_03h_4_padded_checksum")
MakeBoard("generic", "Generic", "RP2040", "0x2e8a", "0xf00a", "GENERIC_RP2040", 16, "boot2_generic_03h_4_padded_checksum")
MakeBoard("challenger_2040_wifi", "iLabs", "Challenger 2040 WiFi", "0x2e8a", "0x1006", "CHALLENGER_2040_WIFI_RP2040", 8, "boot2_w25q080_2_padded_checksum")
MakeBoard("challenger_2040_lte", "iLabs", "Challenger 2040 LTE", "0x2e8a", "0x100b", "CHALLENGER_2040_LTE_RP2040", 8, "boot2_w25q080_2_padded_checksum")
MakeBoard("melopero_shake_rp2040", "Melopero", "Shake RP2040", "0x2e8a", "0x1005", "MELOPERO_SHAKE_RP2040", 16, "boot2_w25q080_2_padded_checksum")
| 58.409396
| 160
| 0.638745
|
82620f7abeac04f703ce1ae528ae1d860cd3bfa9
| 7,418
|
py
|
Python
|
components/ADC_LCD/lcd.py
|
minhan74/MIS-Locker
|
ab7e90000d566e818b4f44bd27165ed45a8f653f
|
[
"Apache-2.0",
"MIT"
] | 2
|
2020-07-29T13:38:54.000Z
|
2021-07-24T13:54:48.000Z
|
components/ADC_LCD/lcd.py
|
minhan74/MIS-Locker
|
ab7e90000d566e818b4f44bd27165ed45a8f653f
|
[
"Apache-2.0",
"MIT"
] | 4
|
2020-02-27T09:53:34.000Z
|
2020-02-27T09:54:35.000Z
|
components/ADC_LCD/lcd.py
|
minhan74/MIS-Locker
|
ab7e90000d566e818b4f44bd27165ed45a8f653f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""------------------------------------------------------------*-
LCD application module for Raspberry Pi
Tested on: Raspberry Pi 3 B+
(c) Minh-An Dao 2019
version 1.00 - 02/10/2019
--------------------------------------------------------------
*
*
--------------------------------------------------------------"""
from LCD.LCD_I2C import LCD_I2C
# ---------------------------- Private Parameters:
# -----Address and Screen parameter:
LCD_ADDRESS = 0x27
LCD_WIDTH = 20
LCD_HEIGHT = 4
LCD = None
# ------------------------------ Basic functions ------------------------------
def begin():
global LCD
LCD = LCD_I2C(LCD_ADDRESS, LCD_WIDTH, LCD_HEIGHT)
LCD.backlight()
LCD.clear()
LCD.setCursor(2, 0) # row, column
LCD.write("SYSTEM STARTING")
LCD.setCursor(7, 2) # row, column
LCD.write("....")
def pointerPos(options, pointer):
if options == 3:
for pos in range(1, options+1): # go from 1 to (options+1)
if pos == pointer:
LCD.setCursor(0, pos) # row, column
LCD.write(">")
else:
LCD.setCursor(0, pos) # row, column
LCD.write(" ")
elif options == 2:
for pos in range(1, options+1): # go from 0 to (options+1)
if pos == pointer:
LCD.setCursor(0, pos+1) # row, column
LCD.write(">")
else:
LCD.setCursor(0, pos+1) # row, column
LCD.write(" ")
def clear():
LCD.clear()
# ------------------------------ User level interfaces ------------------------------
def waitPage(): # user can use RFID or their fingerprint
LCD.setCursor(4, 0) # row, column
LCD.write("SYSTEM READY")
LCD.setCursor(5, 2) # row, column
LCD.write("waiting...")
def welcomePage(name, mssv, locker_num):
LCD.setCursor(3, 0) # row, column
LCD.write("WELCOME TO MIS!")
LCD.setCursor(0, 1) # row, column
LCD.write("Name: " + name)
LCD.setCursor(0, 2) # row, column
LCD.write("MSSV: " + mssv)
LCD.setCursor(0, 3) # row, column
LCD.write("Your Locker is " + str(locker_num))
def addExtraInfoPage(name, mssv):
LCD.setCursor(0, 0) # row, column
LCD.write("Name: " + name)
LCD.setCursor(0, 1) # row, column
LCD.write("MSSV: " + mssv)
LCD.setCursor(2, 2) # row, column
LCD.write("New RFID")
LCD.setCursor(2, 3) # row, column
LCD.write("New fingerprint")
def welcomeTempPage(locker_num):
LCD.setCursor(3, 0) # row, column
LCD.write("TEMPORARY USER")
LCD.setCursor(0, 1) # row, column
LCD.write("This is a one-time")
LCD.setCursor(0, 2) # row, column
LCD.write("user only.")
LCD.setCursor(0, 3) # row, column
LCD.write("Your Locker is " + str(locker_num))
def unknownIDPage():
LCD.setCursor(1, 0) # row, column
LCD.write("NOT RECOGNISED ID!")
LCD.setCursor(2, 2) # row, column
LCD.write("Add New ID")
LCD.setCursor(2, 3) # row, column
LCD.write("Add Existed ID")
def addFingerPage(number):
LCD.setCursor(2, 0) # row, column
LCD.write("ADD FINGERPRINT")
LCD.setCursor(2, 2) # row, column
LCD.write("Put your finger")
LCD.setCursor(2, 3) # row, column
LCD.write("in position " + str(number))
def addFingerSuccessPage():
LCD.setCursor(2, 0) # row, column
LCD.write("ADD FINGERPRINT")
LCD.setCursor(1, 1) # row, column
LCD.write("Fingerprint added!")
LCD.setCursor(0, 3) # row, column
LCD.write("Press any to return")
def addFingerFailPage():
LCD.setCursor(2, 0) # row, column
LCD.write("ADD FINGERPRINT")
LCD.setCursor(6, 1) # row, column
LCD.write("Failed!")
LCD.setCursor(2, 2) # row, column
LCD.write("Retry")
LCD.setCursor(2, 3) # row, column
LCD.write("Cancel")
def addRFIDPage():
LCD.setCursor(6, 0) # row, column
LCD.write("ADD RFID")
LCD.setCursor(3, 2) # row, column
LCD.write("Put your RFID")
LCD.setCursor(3, 3) # row, column
LCD.write("in the reader")
def addRFIDSuccessPage():
LCD.setCursor(6, 0) # row, column
LCD.write("ADD RFID")
LCD.setCursor(4, 1) # row, column
LCD.write("RFID added!")
LCD.setCursor(0, 3) # row, column
LCD.write("Press any to return")
def addRFIDFailPage():
LCD.setCursor(6, 0) # row, column
LCD.write("ADD RFID")
LCD.setCursor(6, 1) # row, column
LCD.write("Failed!")
LCD.setCursor(2, 2) # row, column
LCD.write("Retry")
LCD.setCursor(2, 3) # row, column
LCD.write("Cancel")
def addInfoPage():
LCD.setCursor(5, 0) # row, column
LCD.write("ADD NEW INFO")
LCD.setCursor(0, 1) # row, column
LCD.write("Connect to MIS-CTU")
LCD.setCursor(0, 2) # row, column
LCD.write("wifi & open browser")
LCD.setCursor(0, 3) # row, column
LCD.write("to: mis-locker.ctu")
# ------------------------------ Admin level interfaces ------------------------------
def mainDatabasePage():
LCD.setCursor(0, 0) # row, column
LCD.write("ADMIN MENU:")
LCD.setCursor(2, 1) # row, column
LCD.write("1. Export Database") # export will export everything
LCD.setCursor(2, 2) # row, column
LCD.write("2. Import Database") # import will have 2 options: append and override
LCD.setCursor(2, 3) # row, column
LCD.write("3. Clear Database") #
def exportSuccessPage():
LCD.setCursor(2, 0) # row, column
LCD.write("EXPORT DATABASE")
LCD.setCursor(7, 1) # row, column
LCD.write("Done!")
LCD.setCursor(0, 3) # row, column
LCD.write("Press any to return")
def exportFailPage():
LCD.setCursor(2, 0) # row, column
LCD.write("EXPORT DATABASE")
LCD.setCursor(6, 1) # row, column
LCD.write("Failed!")
LCD.setCursor(2, 2) # row, column
LCD.write("Retry")
LCD.setCursor(2, 3) # row, column
LCD.write("Cancel")
def importPage():
LCD.setCursor(2, 0) # row, column
LCD.write("IMPORT DATABASE")
LCD.setCursor(2, 2) # row, column
LCD.write("1. Append database")
LCD.setCursor(2, 3) # row, column
LCD.write("2. New database")
def importSuccessPage():
LCD.setCursor(2, 0) # row, column
LCD.write("IMPORT DATABASE")
LCD.setCursor(7, 1) # row, column
LCD.write("Done!")
LCD.setCursor(0, 3) # row, column
LCD.write("Press any to return")
def importFailPage():
LCD.setCursor(2, 0) # row, column
LCD.write("IMPORT DATABASE")
LCD.setCursor(6, 1) # row, column
LCD.write("Failed!")
LCD.setCursor(2, 2) # row, column
LCD.write("Retry")
LCD.setCursor(2, 3) # row, column
LCD.write("Cancel")
def mainLockerPage():
LCD.setCursor(0, 0) # row, column
LCD.write("ADMIN MENU:")
LCD.setCursor(2, 1) # row, column
LCD.write("4. Info Locker") # check any locker that has information in
def infoLockerPage(name, mssv):
LCD.setCursor(4, 0) # row, column
LCD.write("INFO LOCKER")
LCD.setCursor(0, 1) # row, column
LCD.write("Name: " + name)
LCD.setCursor(0, 2) # row, column
LCD.write("MSSV: " + mssv)
LCD.setCursor(0, 3) # row, column
LCD.write("Unlock?")
def infoLockerTempPage():
LCD.setCursor(4, 0) # row, column
LCD.write("INFO LOCKER")
LCD.setCursor(0, 1) # row, column
LCD.write("Temporary User")
LCD.setCursor(0, 3) # row, column
LCD.write("Unlock?")
| 28.530769
| 86
| 0.570504
|
07b5371759e64d152674ef32a773f91f76c6f280
| 317
|
py
|
Python
|
src/Case1.py
|
EstudoAAS/linear-algebra-refresher-course
|
cf5ba417e8bbbb8a6683161e4cd7397ebb384c00
|
[
"Apache-2.0"
] | null | null | null |
src/Case1.py
|
EstudoAAS/linear-algebra-refresher-course
|
cf5ba417e8bbbb8a6683161e4cd7397ebb384c00
|
[
"Apache-2.0"
] | null | null | null |
src/Case1.py
|
EstudoAAS/linear-algebra-refresher-course
|
cf5ba417e8bbbb8a6683161e4cd7397ebb384c00
|
[
"Apache-2.0"
] | null | null | null |
import vector
def main():
vetor1 = vector.Vector([8.218,-9.341])
print(vetor1.plus(vector.Vector([-1.129,2.111])))
vetor2 = vector.Vector([7.119,8.215])
print(vetor2.minus(vector.Vector([-8.223,0.878])))
vetor3 = vector.Vector([1.671,-1.012,-0.318])
print(vetor3.times_scalar(7.41))
main()
| 24.384615
| 54
| 0.637224
|
3a3c6de7224f47c6be61b2e0246429ff72ed86d3
| 9,548
|
py
|
Python
|
charmhelpers/contrib/openstack/cert_utils.py
|
cjwatson/charm-helpers
|
386a819fb93a3333807c947d8ff5a7ecc348ade1
|
[
"Apache-2.0"
] | null | null | null |
charmhelpers/contrib/openstack/cert_utils.py
|
cjwatson/charm-helpers
|
386a819fb93a3333807c947d8ff5a7ecc348ade1
|
[
"Apache-2.0"
] | null | null | null |
charmhelpers/contrib/openstack/cert_utils.py
|
cjwatson/charm-helpers
|
386a819fb93a3333807c947d8ff5a7ecc348ade1
|
[
"Apache-2.0"
] | 1
|
2019-01-28T03:42:18.000Z
|
2019-01-28T03:42:18.000Z
|
# Copyright 2014-2018 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Common python helper functions used for OpenStack charm certificats.
import os
import json
from charmhelpers.contrib.network.ip import (
get_hostname,
resolve_network_cidr,
)
from charmhelpers.core.hookenv import (
local_unit,
network_get_primary_address,
config,
related_units,
relation_get,
relation_ids,
unit_get,
NoNetworkBinding,
log,
WARNING,
)
from charmhelpers.contrib.openstack.ip import (
ADMIN,
resolve_address,
get_vip_in_network,
INTERNAL,
PUBLIC,
ADDRESS_MAP)
from charmhelpers.core.host import (
mkdir,
write_file,
)
from charmhelpers.contrib.hahelpers.apache import (
install_ca_cert
)
class CertRequest(object):
"""Create a request for certificates to be generated
"""
def __init__(self, json_encode=True):
self.entries = []
self.hostname_entry = None
self.json_encode = json_encode
def add_entry(self, net_type, cn, addresses):
"""Add a request to the batch
:param net_type: str netwrok space name request is for
:param cn: str Canonical Name for certificate
:param addresses: [] List of addresses to be used as SANs
"""
self.entries.append({
'cn': cn,
'addresses': addresses})
def add_hostname_cn(self):
"""Add a request for the hostname of the machine"""
ip = unit_get('private-address')
addresses = [ip]
# If a vip is being used without os-hostname config or
# network spaces then we need to ensure the local units
# cert has the approriate vip in the SAN list
vip = get_vip_in_network(resolve_network_cidr(ip))
if vip:
addresses.append(vip)
self.hostname_entry = {
'cn': get_hostname(ip),
'addresses': addresses}
def add_hostname_cn_ip(self, addresses):
"""Add an address to the SAN list for the hostname request
:param addr: [] List of address to be added
"""
for addr in addresses:
if addr not in self.hostname_entry['addresses']:
self.hostname_entry['addresses'].append(addr)
def get_request(self):
"""Generate request from the batched up entries
"""
if self.hostname_entry:
self.entries.append(self.hostname_entry)
request = {}
for entry in self.entries:
sans = sorted(list(set(entry['addresses'])))
request[entry['cn']] = {'sans': sans}
if self.json_encode:
return {'cert_requests': json.dumps(request, sort_keys=True)}
else:
return {'cert_requests': request}
def get_certificate_request(json_encode=True):
"""Generate a certificatee requests based on the network confioguration
"""
req = CertRequest(json_encode=json_encode)
req.add_hostname_cn()
# Add os-hostname entries
for net_type in [INTERNAL, ADMIN, PUBLIC]:
net_config = config(ADDRESS_MAP[net_type]['override'])
try:
net_addr = resolve_address(endpoint_type=net_type)
ip = network_get_primary_address(
ADDRESS_MAP[net_type]['binding'])
addresses = [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip))
if vip:
addresses.append(vip)
if net_config:
req.add_entry(
net_type,
net_config,
addresses)
else:
# There is network address with no corresponding hostname.
# Add the ip to the hostname cert to allow for this.
req.add_hostname_cn_ip(addresses)
except NoNetworkBinding:
log("Skipping request for certificate for ip in {} space, no "
"local address found".format(net_type), WARNING)
return req.get_request()
def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
"""Create symlinks for SAN records
:param ssl_dir: str Directory to create symlinks in
:param custom_hostname_link: str Additional link to be created
"""
hostname = get_hostname(unit_get('private-address'))
hostname_cert = os.path.join(
ssl_dir,
'cert_{}'.format(hostname))
hostname_key = os.path.join(
ssl_dir,
'key_{}'.format(hostname))
# Add links to hostname cert, used if os-hostname vars not set
for net_type in [INTERNAL, ADMIN, PUBLIC]:
try:
addr = resolve_address(endpoint_type=net_type)
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
os.symlink(hostname_cert, cert)
os.symlink(hostname_key, key)
except NoNetworkBinding:
log("Skipping creating cert symlink for ip in {} space, no "
"local address found".format(net_type), WARNING)
if custom_hostname_link:
custom_cert = os.path.join(
ssl_dir,
'cert_{}'.format(custom_hostname_link))
custom_key = os.path.join(
ssl_dir,
'key_{}'.format(custom_hostname_link))
if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert):
os.symlink(hostname_cert, custom_cert)
os.symlink(hostname_key, custom_key)
def install_certs(ssl_dir, certs, chain=None):
"""Install the certs passed into the ssl dir and append the chain if
provided.
:param ssl_dir: str Directory to create symlinks in
:param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
:param chain: str Chain to be appended to certs
"""
for cn, bundle in certs.items():
cert_filename = 'cert_{}'.format(cn)
key_filename = 'key_{}'.format(cn)
cert_data = bundle['cert']
if chain:
# Append chain file so that clients that trust the root CA will
# trust certs signed by an intermediate in the chain
cert_data = cert_data + os.linesep + chain
write_file(
path=os.path.join(ssl_dir, cert_filename),
content=cert_data, perms=0o640)
write_file(
path=os.path.join(ssl_dir, key_filename),
content=bundle['key'], perms=0o640)
def process_certificates(service_name, relation_id, unit,
custom_hostname_link=None):
"""Process the certificates supplied down the relation
:param service_name: str Name of service the certifcates are for.
:param relation_id: str Relation id providing the certs
:param unit: str Unit providing the certs
:param custom_hostname_link: str Name of custom link to create
"""
data = relation_get(rid=relation_id, unit=unit)
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
mkdir(path=ssl_dir)
name = local_unit().replace('/', '_')
certs = data.get('{}.processed_requests'.format(name))
chain = data.get('chain')
ca = data.get('ca')
if certs:
certs = json.loads(certs)
install_ca_cert(ca.encode())
install_certs(ssl_dir, certs, chain)
create_ip_cert_links(
ssl_dir,
custom_hostname_link=custom_hostname_link)
def get_requests_for_local_unit(relation_name=None):
"""Extract any certificates data targeted at this unit down relation_name.
:param relation_name: str Name of relation to check for data.
:returns: List of bundles of certificates.
:rtype: List of dicts
"""
local_name = local_unit().replace('/', '_')
raw_certs_key = '{}.processed_requests'.format(local_name)
relation_name = relation_name or 'certificates'
bundles = []
for rid in relation_ids(relation_name):
for unit in related_units(rid):
data = relation_get(rid=rid, unit=unit)
if data.get(raw_certs_key):
bundles.append({
'ca': data['ca'],
'chain': data.get('chain'),
'certs': json.loads(data[raw_certs_key])})
return bundles
def get_bundle_for_cn(cn, relation_name=None):
"""Extract certificates for the given cn.
:param cn: str Canonical Name on certificate.
:param relation_name: str Relation to check for certificates down.
:returns: Dictionary of certificate data,
:rtype: dict.
"""
entries = get_requests_for_local_unit(relation_name)
cert_bundle = {}
for entry in entries:
for _cn, bundle in entry['certs'].items():
if _cn == cn:
cert_bundle = {
'cert': bundle['cert'],
'key': bundle['key'],
'chain': entry['chain'],
'ca': entry['ca']}
break
if cert_bundle:
break
return cert_bundle
| 34.594203
| 78
| 0.626938
|
ad831b88aded7d99b000fe58454451be76c3d05a
| 7,578
|
py
|
Python
|
analyzeProbaDF.py
|
alexbovet/twitter_opinion_mining
|
e071fc0447072877518a14f2f8f59f0dd974167f
|
[
"BSD-3-Clause"
] | 6
|
2019-02-12T14:41:45.000Z
|
2021-12-06T14:48:30.000Z
|
analyzeProbaDF.py
|
alexbovet/twitter_opinion_mining
|
e071fc0447072877518a14f2f8f59f0dd974167f
|
[
"BSD-3-Clause"
] | null | null | null |
analyzeProbaDF.py
|
alexbovet/twitter_opinion_mining
|
e071fc0447072877518a14f2f8f59f0dd974167f
|
[
"BSD-3-Clause"
] | 2
|
2019-02-11T10:50:54.000Z
|
2022-03-07T09:13:35.000Z
|
# Author: Alexandre Bovet <alexandre.bovet@gmail.com>
# License: BSD 3 clause
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
from functools import partial
import time
def applyParallel(dfGrouped, func, ncpu):
with Pool(ncpu) as p:
ret_list = p.map(func, [group for name, group in dfGrouped])
return pd.concat(ret_list)
def run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
if run_from_ipython():
from IPython.display import display
# aggregating functions: used on tweets grouped by day
def get_num_tweets(group, parallel=True):
""" returns the number of tweets in each camp in group """
# if there is no tweets for this day
if group.index.size == 0:
if parallel:
return pd.DataFrame()
else:
return pd.Series()
else:
data = {'n_pro_1': group.n_pro_1.sum(),
'n_pro_0': group.n_pro_0.sum()}
if parallel:
#must return a datafram when parallel
return pd.DataFrame(data=data, index=[group.datetime_EST.iloc[0].date()])
else:
return pd.Series(data=data)
def get_pro_h_ratio(ggroup):
""" returns the ratio of tweets pro 1 in ggroup"""
return ggroup.n_pro_1.sum()/ggroup.n_pro_1.size
def get_num_users(group, r_threshold=0.5, parallel=True):
""" returns the number of users pro 1 in group with a ratio of
tweets pro of at least r_threshold
"""
if group.index.size == 0:
if parallel:
return pd.DataFrame()
else:
return pd.Series()
else:
# group tweets per users
g_users = group.groupby('user_id')
# ratio of pro hillary tweets for each user
pro_h_ratio = g_users.apply(get_pro_h_ratio)
n_pro_1 = (pro_h_ratio > r_threshold).sum()
n_pro_0 = (pro_h_ratio < 1-r_threshold).sum()
n_null = pro_h_ratio.size - n_pro_1 - n_pro_0
data = {'n_pro_1': n_pro_1,
'n_pro_0': n_pro_0,
'null': n_null}
if parallel:
return pd.DataFrame(data=data, index=[group.datetime_EST.iloc[0].date()])
else:
return pd.Series(data=data)
from baseModule import baseModule
class analyzeProbaDF(baseModule):
""" Computes the number of tweets and the number of users in each camp per day.
Must be initialized with a dictionary `job` containing keys `df_proba_filename`,
`df_num_tweets_filename` and `df_num_users_filename`.
`analyzeProbaDF` reads `df_proba_filename` and returns the number of tweets
and the number of users in each camp per day. The results are displayed and
saved as pandas dataframes to `df_num_tweets_filename` and `df_num_users_filename`.
*Optional parameters:*
:ncpu: number of cores to use. Default is number of cores of the machine
minus one.
:resampling_frequency: frequency at which tweets are grouped.
Default is `'D'`, i.e. daily. (see [1] for
different possibilities.)
:threshold: threshold for the classifier probability (threshold >= 0.5).
Tweets with p > threshold are classified in camp2 and tweets with
p < 1-threshold are classified in camp1. Default is 0.5.
:r_threshold: threshold for the ratio of classified tweets needed to
classify a user. Default is 0.5.
[1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
"""
def run(self):
#==============================================================================
# PARAMETERS
#==============================================================================
df_proba_filename = self.job['df_proba_filename']
df_num_tweets_filename = self.job['df_num_tweets_filename']
df_num_users_filename = self.job['df_num_users_filename']
#==============================================================================
# OPTIONAL PARAMETERS
#==============================================================================
propa_col_name = self.job.get('propa_col_name', 'p_1')
ncpu = self.job.get('ncpu', cpu_count()-1)
resampling_frequency = self.job.get('resampling_frequency', 'D') # day
# threshold for the classifier probability
threshold = self.job.get('threshold',0.5)
# threshold for the ratio of classified tweets needed to classify a user
r_threshold = self.job.get('r_threshold',0.5)
if ncpu == 1:
parallel=False
else:
parallel=True
print('loading ' + df_proba_filename)
df = pd.read_pickle(df_proba_filename)
# display settings for pandas
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', None)
#% filter dataframe according to threshold
df_filt = df.drop(df.loc[np.all([df[propa_col_name] <= threshold, df[propa_col_name] >= 1-threshold], axis=0)].index)
df_filt['n_pro_1'] = df_filt[propa_col_name] > threshold
df_filt['n_pro_0'] = df_filt[propa_col_name] < 1 - threshold
# resample tweets per day
resample = df_filt.groupby(pd.Grouper(key='datetime_EST',freq=resampling_frequency))
print('threshold: ' + str(threshold))
print('r_threshold: ' + str(r_threshold))
# prepare funtions for parallel apply
get_num_tweets_u = partial(get_num_tweets,
parallel=parallel)
get_num_users_u = partial(get_num_users, r_threshold=r_threshold,
parallel=parallel)
print('computing stats')
t0 = time.time()
if parallel:
self.df_num_tweets = applyParallel(resample, get_num_tweets_u, ncpu)
self.df_num_users = applyParallel(resample, get_num_users_u, ncpu)
else:
self.df_num_tweets = resample.apply(get_num_tweets_u)
self.df_num_users = resample.apply(get_num_users_u)
#%% save dataframes
self.df_num_tweets.to_pickle(df_num_tweets_filename)
self.df_num_users.to_pickle(df_num_users_filename)
print('finished')
print(time.time() - t0)
self.string_results = "\nNumber of tweets per day in each camp:\n"+\
self.df_num_tweets.to_string() + \
"\nNumber of users per day in each camp:\n"+\
self.df_num_users.to_string()
if run_from_ipython():
print('\nNumber of tweets per day in each camp:')
display(self.df_num_tweets)
print('\nNumber of users per day in each camp:')
display(self.df_num_users)
else:
print('\nNumber of tweets per day in each camp:')
print(self.df_num_tweets.to_string())
print('\nNumber of users per day in each camp:')
display(self.df_num_users.to_string())
| 36.965854
| 125
| 0.562418
|
4437a9f444281696ce3dcec12b11bf80cdc2780d
| 6,329
|
py
|
Python
|
URSABench/models/imagenet_resnet.py
|
reml-lab/URSABench
|
f4cceffbf8ea91d95818867c35fead4e2b272aba
|
[
"MIT"
] | 10
|
2020-07-18T14:48:35.000Z
|
2022-01-18T11:21:27.000Z
|
URSABench/models/imagenet_resnet.py
|
reml-lab/URSABench
|
f4cceffbf8ea91d95818867c35fead4e2b272aba
|
[
"MIT"
] | null | null | null |
URSABench/models/imagenet_resnet.py
|
reml-lab/URSABench
|
f4cceffbf8ea91d95818867c35fead4e2b272aba
|
[
"MIT"
] | 4
|
2020-08-30T02:43:32.000Z
|
2021-03-18T03:37:52.000Z
|
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torchvision.transforms import transforms
__all__ = ['INResNet18', 'INResNet34', 'INResNet50', 'INResNet101',
'INResNet152', 'ResNet_dropout']
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.block = block
self.layers = layers
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNet_dropout(nn.Module):
def __init__(self, block, layers, num_classes=10, dropout=0.2):
super(ResNet_dropout, self).__init__()
self.in_planes = 64
self.block = block
self.layers = layers
self.num_classes = num_classes
self.dropout = dropout
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(F.dropout(out, p=self.dropout))
return out
class Base:
base = ResNet
args = list()
kwargs = dict()
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
class INResNet18(Base):
kwargs = {'block': BasicBlock, 'layers': [2, 2, 2, 2]}
class INResNet34(Base):
kwargs = {'block': BasicBlock, 'layers': [3, 4, 6, 3]}
class INResNet50(Base):
kwargs = {'block': Bottleneck, 'layers': [3, 4, 6, 3]}
class INResNet101(Base):
kwargs = {'block': Bottleneck, 'layers': [3, 4, 23, 3]}
class INResNet152(Base):
kwargs = {'block': Bottleneck, 'layers': [3, 8, 36, 3]}
| 34.966851
| 104
| 0.614157
|
7bc191447a05945884dd23d54714c9b5b885d4dd
| 49,671
|
py
|
Python
|
build/python-env/lib/python2.7/site-packages/pip/_vendor/distlib/database.py
|
rfraposa/hadoopbeat
|
884c0f8e0f2027cb4aa180d8f419f05b4d890e85
|
[
"Apache-2.0"
] | 38
|
2017-02-28T05:39:40.000Z
|
2019-01-16T04:39:04.000Z
|
build/python-env/lib/python2.7/site-packages/pip/_vendor/distlib/database.py
|
rfraposa/hadoopbeat
|
884c0f8e0f2027cb4aa180d8f419f05b4d890e85
|
[
"Apache-2.0"
] | 24
|
2016-10-06T23:37:43.000Z
|
2017-02-18T21:36:37.000Z
|
build/python-env/lib/python2.7/site-packages/pip/_vendor/distlib/database.py
|
rfraposa/hadoopbeat
|
884c0f8e0f2027cb4aa180d8f419f05b4d890e85
|
[
"Apache-2.0"
] | 18
|
2016-09-30T17:44:05.000Z
|
2021-12-23T07:55:11.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Plaeholder for summary'
return Distribution(md)
| 37.83016
| 86
| 0.562703
|
1aebea49794d246238656bfc03b521d1b3ac1151
| 662
|
py
|
Python
|
examples/simple_syslog_example.py
|
Worvast/python-utils
|
1cb4bf9e73b589b1e543685cb0427f05e1731165
|
[
"MIT"
] | 4
|
2019-02-20T16:59:39.000Z
|
2020-04-08T02:04:58.000Z
|
examples/simple_syslog_example.py
|
Worvast/python-utils
|
1cb4bf9e73b589b1e543685cb0427f05e1731165
|
[
"MIT"
] | 29
|
2019-02-22T16:19:25.000Z
|
2022-03-31T13:02:14.000Z
|
examples/simple_syslog_example.py
|
Worvast/python-utils
|
1cb4bf9e73b589b1e543685cb0427f05e1731165
|
[
"MIT"
] | 2
|
2019-02-22T14:32:28.000Z
|
2020-03-19T15:46:06.000Z
|
from devoutils.faker import SyslogFakeGenerator
from devo.sender import Sender
if __name__ == "__main__":
with open("./simple_syslog_template.jinja2", 'r') as myfile:
template = myfile.read()
con = None
# This example need a sender con
# Example
# con = Sender(config="./config.yaml")
# If you remove simulation or set to false, data will be send
f = SyslogFakeGenerator(engine=con,
template=template,
simulation=True,
probability=100,
frequency=(1, 1),
verbose=True)
f.start()
| 30.090909
| 65
| 0.546828
|
3396d85ba0be8afb9996249c9845c983898ffee8
| 312
|
py
|
Python
|
mover/gametest/persistence-sqlite.py
|
ismailqau/libxayagame
|
21aa17ab6d93d7bdf3882788243a2ae591d7e841
|
[
"MIT"
] | 16
|
2018-09-08T02:11:44.000Z
|
2022-02-07T00:13:42.000Z
|
mover/gametest/persistence-sqlite.py
|
ismailqau/libxayagame
|
21aa17ab6d93d7bdf3882788243a2ae591d7e841
|
[
"MIT"
] | 47
|
2018-09-04T11:19:27.000Z
|
2021-09-14T12:53:25.000Z
|
mover/gametest/persistence-sqlite.py
|
ismailqau/libxayagame
|
21aa17ab6d93d7bdf3882788243a2ae591d7e841
|
[
"MIT"
] | 13
|
2018-12-20T19:50:22.000Z
|
2022-03-14T21:47:04.000Z
|
#!/usr/bin/env python3
# Copyright (C) 2018-2020 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from persistence import PersistenceTest
if __name__ == "__main__":
PersistenceTest ("sqlite").main ()
| 31.2
| 69
| 0.766026
|
d2805130789e5d21e505ff389e4830e24dda7a43
| 1,735
|
py
|
Python
|
working-with-files.py
|
Marketionist/py-playground
|
8982ac4db4638a4e725da2adcc39fe132d62a65b
|
[
"MIT"
] | null | null | null |
working-with-files.py
|
Marketionist/py-playground
|
8982ac4db4638a4e725da2adcc39fe132d62a65b
|
[
"MIT"
] | null | null | null |
working-with-files.py
|
Marketionist/py-playground
|
8982ac4db4638a4e725da2adcc39fe132d62a65b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# python working-with-files.py
import os
import shutil
import glob
# Source file path
source = 'working-with-files.py'
# Destination file path
destination = source.replace('.py', '_copy.py')
# Print the absolute path to the current directory
print('Current directory absolute path:', os.getcwd())
# List files and directories in the current directory
print(f'Before copying file: {os.listdir()}')
# List all python files in the current directory
py_files = []
for file in glob.glob('*.py'):
py_files.append(file)
print(f'All .py files: {py_files}')
# Print the absolute path to the source file
print('Absolute path to the source file:', os.path.normpath(os.path.join(
os.getcwd(), source
)))
# Print file permission of the source
source_perm = oct(os.stat(source).st_mode)[-3:]
print('{file} file permission mode: {perm}\n'.format(
file=source, perm=source_perm)
)
# Copy the content of source to destination
try:
dest = shutil.copy(source, destination)
print('File copied successfully!\n')
# If source and destination are same
except shutil.SameFileError:
print('Source and destination represents the same file')
# If there is any permission issue
except PermissionError:
print('Permission denied')
# For other errors
except:
print('Error occurred while copying file')
# List files and directories in '/home / User / Documents'
print(f'After copying file:\n {os.listdir()}')
# Print file permission of the destination
destination_perm = oct(os.stat(destination).st_mode)[-3:]
print('{file} file permission mode: {perm}'.format(
file=destination, perm=destination_perm)
)
# Print path of newly created file
print('Destination absolute path:', os.path.abspath(dest))
| 26.692308
| 73
| 0.732565
|
ec2116b59b16dc219ac09503a9e95c6abc26e062
| 1,961
|
py
|
Python
|
tests/patterns/test_match_path_patterns.py
|
hile/pathlib-tree
|
73fe3132472548ca2480b3ab3278dfaab252b149
|
[
"PSF-2.0"
] | 2
|
2022-01-11T14:50:24.000Z
|
2022-03-16T21:37:09.000Z
|
tests/patterns/test_match_path_patterns.py
|
hile/pathlib-tree
|
73fe3132472548ca2480b3ab3278dfaab252b149
|
[
"PSF-2.0"
] | null | null | null |
tests/patterns/test_match_path_patterns.py
|
hile/pathlib-tree
|
73fe3132472548ca2480b3ab3278dfaab252b149
|
[
"PSF-2.0"
] | null | null | null |
"""
Unit tests for pathlib_tree.patterns functions
"""
from pathlib_tree.patterns import match_path_patterns
def test_match_simple_patterns_no_match():
"""
Test simple relative tree pattern match case
"""
patterns = (
'*/*.txt',
)
assert not match_path_patterns(
patterns,
'/data',
'/test/other files/filename.txt'
)
def test_match_simple_patterns_direct_match():
"""
Test simple relative tree pattern match case
"""
patterns = (
'filename.txt',
'*/*.txt',
)
assert match_path_patterns(
patterns,
'/test',
'/test/other files/filename.txt'
)
patterns = (
'test/other files/'
)
assert match_path_patterns(
patterns,
'/test/other files',
'/test/other files/filename.txt'
)
def test_match_simple_patterns():
"""
Test simple relative tree pattern match case
"""
patterns = (
'filename.wav',
'*/*.txt',
)
assert match_path_patterns(
patterns,
'/test',
'/test/other files/filename.txt'
)
def test_match_prefix_match():
"""
Test simple relative tree pattern match case
"""
patterns = (
'other files/*.txt',
)
assert match_path_patterns(
patterns,
'/test',
'/test/other files/filename.txt'
)
def test_match_prefix_glob_match():
"""
Test simple relative tree pattern match case
"""
patterns = (
'*/other files/*',
)
assert match_path_patterns(
patterns,
'/test',
'/test/data/other files/deeper/filename.txt'
)
def test_match_relative_pattern():
"""
Test matching a relative path pattern
"""
patterns = (
'other */*.wav',
'*/*.txt',
)
assert match_path_patterns(
patterns,
'/test/data',
'/test/data/other files/filename.txt'
)
| 19.415842
| 53
| 0.565018
|
dee5f6b70056ae69ca2c8e74471c4c81d3c7e15f
| 11,092
|
py
|
Python
|
app/main/routes_zymatic_api.py
|
branflakem3/picobrew_pico
|
bd6a06e459eef4c92bc0ca32f11c190fba292809
|
[
"MIT"
] | null | null | null |
app/main/routes_zymatic_api.py
|
branflakem3/picobrew_pico
|
bd6a06e459eef4c92bc0ca32f11c190fba292809
|
[
"MIT"
] | null | null | null |
app/main/routes_zymatic_api.py
|
branflakem3/picobrew_pico
|
bd6a06e459eef4c92bc0ca32f11c190fba292809
|
[
"MIT"
] | null | null | null |
import json
import uuid
import os
from datetime import datetime
from webargs import fields
from webargs.flaskparser import use_args, FlaskParser
from .. import socketio
from . import main
from .config import brew_active_sessions_path
from .model import MachineType, PicoBrewSession
from .routes_frontend import get_zymatic_recipes
from .session_parser import active_brew_sessions
arg_parser = FlaskParser()
events = {}
# usersetup: /API/usersetup?machine={}&admin=0
# Response: '\r\n#{0}/{1}|#' where {0} : Profile GUID, {1} = User Name
user_setup_args = {
'machine': fields.Str(required=True), # 12 character alpha-numeric Product ID
'admin': fields.Int(required=True), # Always 0
}
@main.route('/API/usersetup')
@use_args(user_setup_args, location='querystring')
def process_user_setup(args):
profile_guid = uuid.uuid4().hex[:32]
user_name = "DefaultUser" # TODO: Config parameter?
return '\r\n#{}/{}|#'.format(profile_guid, user_name)
# firstSetup: /API/firstSetup?machine={}|1W_ADDR,1/1W_ADDR,2/1W_ADDR,3/1W_ADDR,4&admin=0
# Response: '\r\n'
first_setup_args = {
'machine': fields.Str(required=True), # 12 character alpha-numeric Product ID (1W_ADDR = 16 character alpha-numeric OneWire Address, i.e. 28b0123456789abc)
'admin': fields.Int(required=True), # Always 0
}
@main.route('/API/firstSetup')
@use_args(first_setup_args, location='querystring')
def process_first_setup(args):
return '\r\n'
# zymaticFirmwareCheck: /API/zymaticFirmwareCheck?machine={}&ver={}&maj={}&min={}
# Response: '\r\n#{0}#\r\n' where {0} : T/F if newer firmware available
zymatic_firmware_check_args = {
'machine': fields.Str(required=True), # 12 character alpha-numeric Product ID
'ver': fields.Int(required=True), # Int Version
'maj': fields.Int(required=True), # Int Major Version
'min': fields.Int(required=True), # Int Minor Version
}
@main.route('/API/zymaticFirmwareCheck')
@use_args(zymatic_firmware_check_args, location='querystring')
def process_zymatic_firmware_check(args):
uid = args['machine']
if uid not in active_brew_sessions:
active_brew_sessions[uid] = PicoBrewSession(MachineType.ZYMATIC)
return '\r\n#F#\r\n'
# SyncUser: /API/SyncUser?user={}&machine={}
# Response: '\r\n\r\n#{0}#' where {0} : Cleaning/Recipe List
sync_user_args = {
'user': fields.Str(required=True), # 32 character alpha-numeric Profile GUID
'machine': fields.Str(required=True), # 12 character alpha-numeric Product ID
}
@main.route('/API/SyncUser')
@main.route('/API/SyncUSer')
@use_args(sync_user_args, location='querystring')
def process_sync_user(args):
clean = False
if args['user'] == '00000000000000000000000000000000':
# New Clean V6
# -Make sure that all 3 mash screens are in place in the step filter. Do not insert the adjunct loaf/hop cages.
# -Place cleaning tablet in the right area of the adjunct compartment, on top of the smaller screen, at the end of the metal tab. 1/4 cup of powdered dishwasher detergent can also be used.
# -Add 1.5 gallons of HOT tap water to the keg at completion of cleaning cycle (prompt notes/before final step) -Empty and rinse keg, step filter, screens, and in-line filter.
# -Fill keg with 4 gallons of hot tap water
# -Connect black fitting to 'OUT' post on keg
# -Attach wand to grey fitting and run it into bucket or sink, OR attach grey fitting to 'IN' post on empty keg
# -Continue, this will rinse your system. There should be no water left in the keg at the end of this step.
# Share your experience with info@picobrew.com, Subject - New Clean Beta attn: Kevin, attach pictures of debris removed and collected on screens if possible.
clean = True
return '\r\n\r\n#{0}#'.format(get_zymatic_recipe_list(clean))
# checksync: /API/checksync?user={}
# Response: '\r\n#!#' or '\r\n#+#'
check_sync_args = {
'user': fields.Str(required=True), # 32 character alpha-numeric Profile GUID
}
@main.route('/API/checksync')
@use_args(check_sync_args, location='querystring')
def process_check_sync(args):
# Needs Sync '\r\n#+#'
# No Sync '\r\n#!#'
return '\r\n#!#'
# recoversession: /API/recoversession?session={}&code={}
# Response: '\r\n#{0}!#' where {0} = Recipe String or '\r\n#{0}#' where {0} = Recovery Step
recover_session_args = {
'session': fields.Str(required=True), # 32 character alpha-numeric session
'code': fields.Int(required=True), # 0 = Step 1, 1 = Step 2
}
@main.route('/API/recoversession')
@use_args(recover_session_args, location='querystring')
def process_recover_session(args):
session = args['session']
uid = get_machine_by_session(session)
if args['code'] == 0:
return '\r\n#{0}!#'.format(get_recipe_by_name(active_brew_sessions[uid].name))
else:
return '\r\n#{0}#'.format(active_brew_sessions[uid].recovery)
# sessionerror: /API/sessionerror?machine={}&session={}&errorcode={}
# Response: '\r\n'
session_error_args = {
'machine': fields.Str(required=True), # 12 character alpha-numeric Product ID
'session': fields.Str(required=True), # 32 character alpha-numeric session
'errorcode': fields.Int(required=True), # Int Error Code
}
@main.route('/API/sessionerror')
@use_args(session_error_args, location='querystring')
def process_session_error(args):
# TODO: What to do?
return '\r\n'
# logsession: /API/logSession?user={}&recipe={}&code={}&machine={}&firm={}
# /API/logsession?session={}&code=1&data={}&state={}
# /API/LogSession?session={}&data={}&code=2&step={}&state={}
# /API/logsession?session={}&code=3
# Response: '\r\n#{0}#' where {0} = Session or '\r\n'
log_session_args = {
'user': fields.Str(required=False), # 32 character alpha-numeric Profile GUID
'recipe': fields.Str(required=False), # 32 character alpha-numeric recipe
'code': fields.Int(required=True), # 0 = New Session, 1 = Event, 2 = Temperature Data, 3 = End Session
'machine': fields.Str(required=False), # 12 character alpha-numeric Product ID
'firm': fields.Str(required=False), # Current firmware version - i.e. 0.1.14
'session': fields.Str(required=False), # 32 character alpha-numeric session
'data': fields.Str(required=False), # Event Name / Temperature Data, HTTP Formatted (i.e. %20 ( ) or %2F = (/))
'state': fields.Int(required=False), # ?
'step': fields.Str(required=False), # 8 Integers separated by / for recovery
}
@main.route('/API/logsession')
@main.route('/API/logSession')
@main.route('/API/LogSession')
@use_args(log_session_args, location='querystring')
def process_log_session(args):
ret = '\r\n'
global events
if args['code'] == 0:
uid = args['machine']
if uid not in active_brew_sessions:
active_brew_sessions[uid] = PicoBrewSession(MachineType.ZYMATIC)
active_brew_sessions[uid].session = uuid.uuid4().hex[:32]
active_brew_sessions[uid].name = get_recipe_name_by_id(args['recipe'])
# replace spaces and '#' with other character sequences
encoded_recipe = active_brew_sessions[uid].name.replace(' ', '_').replace("#", "%23")
filename = '{0}#{1}#{2}#{3}.json'.format(datetime.now().strftime('%Y%m%d_%H%M%S'), uid, active_brew_sessions[uid].session, encoded_recipe)
active_brew_sessions[uid].filepath = brew_active_sessions_path().joinpath(filename)
active_brew_sessions[uid].file = open(active_brew_sessions[uid].filepath, 'w')
active_brew_sessions[uid].file.write('[')
active_brew_sessions[uid].file.flush()
ret = '\r\n#{0}#'.format(active_brew_sessions[uid].session)
elif args['code'] == 1:
session = args['session']
if session not in events:
events[session] = []
events[session].append(args['data'])
uid = get_machine_by_session(session)
active_brew_sessions[uid].step = args['data']
elif args['code'] == 2:
session = args['session']
uid = get_machine_by_session(session)
temps = [int(temp[2:]) for temp in args['data'].split('|')]
session_data = {'time': ((datetime.utcnow() - datetime(1970, 1, 1)).total_seconds() * 1000),
'wort': temps[0],
'heat1': temps[1],
'board': temps[2],
'heat2': temps[3],
'step': active_brew_sessions[uid].step,
'recovery': args['step'],
'state': args['state'],
}
event = None
if session in events and len(events[session]) > 0:
if len(events[session]) > 1:
print('DEBUG: Zymatic events > 1 - size = {}'.format(len(events[session])))
event = events[session].pop(0)
session_data.update({'event': event})
active_brew_sessions[uid].data.append(session_data)
active_brew_sessions[uid].recovery = args['step']
graph_update = json.dumps({'time': session_data['time'],
'data': temps,
'session': active_brew_sessions[uid].name,
'step': active_brew_sessions[uid].step,
'event': event,
})
socketio.emit('brew_session_update|{}'.format(uid), graph_update)
active_brew_sessions[uid].file.write('\n\t{},'.format(json.dumps(session_data)))
active_brew_sessions[uid].file.flush()
else:
session = args['session']
uid = get_machine_by_session(session)
active_brew_sessions[uid].file.seek(0, os.SEEK_END)
active_brew_sessions[uid].file.seek(active_brew_sessions[uid].file.tell() - 1, os.SEEK_SET) # Remove trailing , from last data set
active_brew_sessions[uid].file.write('\n]\n')
active_brew_sessions[uid].cleanup()
return ret
# -------- Utility --------
def get_zymatic_recipe_list(clean):
recipe_list_clean = ''
recipe_list_brew = ''
for r in get_zymatic_recipes(False):
if r.clean:
recipe_list_clean += r.serialize()
else:
recipe_list_brew += r.serialize()
return recipe_list_clean if clean else recipe_list_brew
def get_recipe_name_by_id(recipe_id):
recipe = next((r for r in get_zymatic_recipes(False) if r.id == recipe_id), None)
return 'Invalid Recipe' if not recipe else recipe.name
def get_recipe_by_name(recipe_name):
recipe = next((r for r in get_zymatic_recipes(False) if r.name == recipe_name), None)
return '' if not recipe else recipe.serialize()
def get_machine_by_session(session):
return next((uid for uid in active_brew_sessions if active_brew_sessions[uid].session == session), None)
| 42.992248
| 196
| 0.642896
|
a3c2668a5af0a6e1d9251203f86d2330914e18ad
| 1,471
|
py
|
Python
|
2020/day19/solve.py
|
shiftinv/adventofcode
|
650fa2334c0d82a547b04e0f9a417fb359bc557c
|
[
"MIT"
] | null | null | null |
2020/day19/solve.py
|
shiftinv/adventofcode
|
650fa2334c0d82a547b04e0f9a417fb359bc557c
|
[
"MIT"
] | null | null | null |
2020/day19/solve.py
|
shiftinv/adventofcode
|
650fa2334c0d82a547b04e0f9a417fb359bc557c
|
[
"MIT"
] | null | null | null |
import re
import functools
def parse_input(lines):
split = lines.index('')
rules = {}
for line in lines[:split]:
n, r = line.split(': ')
if r.startswith('"'):
d = r[1:-1]
else:
d = [list(map(int, a.split())) for a in r.split('|')]
rules[int(n)] = d
return rules, lines[split + 1:]
@functools.lru_cache(maxsize=None)
def build_regex(rule_num, part):
r = rules[rule_num]
if isinstance(r, str):
return r
if part == 2 and rule_num in [8, 11]:
if rule_num == 8:
res = f'(?:{build_regex(42, 2)})+'
elif rule_num == 11:
res = '(?:' + '|'.join(
''.join(build_regex(42, 2) * n) + ''.join(build_regex(31, 2) * n)
for n in range(1, 20) # arbitrary upper limit of repeats, kinda hacky
) + ')'
else:
res = '(?:' + '|'.join(
''.join(build_regex(n, part) for n in a)
for a in r
) + ')'
return res
with open('input.txt', 'r') as f:
lines = f.read().splitlines()
rules, messages = parse_input(lines)
# part 1
rule_0_re = re.compile(f'^{build_regex(0, 1)}$')
print(sum(rule_0_re.match(msg) is not None for msg in messages))
# part 2
# just your average 405k char regex, no big deal
# only takes 1.3s though, that's pretty impressive
rule_0_re = re.compile(f'^{build_regex(0, 2)}$')
print(sum(rule_0_re.match(msg) is not None for msg in messages))
| 26.745455
| 86
| 0.545207
|
616337762a4dde86b96ac55d0ba054bcc732f58e
| 33,798
|
py
|
Python
|
BioClients/chembl/Utils.py
|
jeremyjyang/BioClients
|
b78ab2b948c79616fed080112e31d383346bec58
|
[
"CC0-1.0"
] | 10
|
2020-05-26T07:29:14.000Z
|
2021-12-06T21:33:40.000Z
|
BioClients/chembl/Utils.py
|
jeremyjyang/BioClients
|
b78ab2b948c79616fed080112e31d383346bec58
|
[
"CC0-1.0"
] | 1
|
2021-10-05T12:25:30.000Z
|
2021-10-05T17:05:56.000Z
|
BioClients/chembl/Utils.py
|
jeremyjyang/BioClients
|
b78ab2b948c79616fed080112e31d383346bec58
|
[
"CC0-1.0"
] | 2
|
2021-03-16T03:20:24.000Z
|
2021-08-08T20:17:10.000Z
|
#!/usr/bin/env python3
"""
Utility functions for ChEMBL REST API.
https://chembl.gitbook.io/chembl-interface-documentation/web-services/chembl-data-web-services
http://chembl.blogspot.com/2015/02/using-new-chembl-web-services.html
"""
###
import sys,os,re,json,time,urllib.parse,logging,tqdm
import pandas as pd
#
from ..util import rest
#
NCHUNK=100
#
API_HOST="www.ebi.ac.uk"
API_BASE_PATH="/chembl/api/data"
BASE_URL="https://"+API_HOST+API_BASE_PATH
#
##############################################################################
def Status(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(f"{base_url}/status.json", parse_json=True)
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
df = pd.DataFrame({tag:(rval[tag] if tag in rval else "") for tag in rval.keys()})
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def GetTargetByUniprot(ids, base_url=BASE_URL, fout=None):
n_out=0;
ids_chembl = set()
fout.write("UniprotId\ttarget_chembl_id\n")
for uniprot in ids:
id_chembl=None
rval = rest.Utils.GetURL(f"{base_url}/target.json?target_components__accession={uniprot}", parse_json=True)
targets = rval["targets"] if "targets" in rval else []
for target in targets:
id_chembl = target["target_chembl_id"]
ids_chembl.add(id_chembl)
fout.write(f"{uniprot}\t\{id_chembl}\n")
n_out+=1
if len(ids_chembl)>1:
logging.info(f"Uniprot ambiguous: {uniprot}")
for id_chembl in list(ids_chembl):
logging.debug(f"Uniprot: {uniprot} -> ChEMBL: {id_chembl}")
logging.info(f"n_out: {n_out}")
#############################################################################
def GetActivity(ids, resource, pmin, skip=0, nmax=None, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''Get activity data and necessary references only, due to size concerns. resource = assay|target|molecule. Filter on pChEMBL value, standardized negative log molar half-max response activity.'''
n_act=0; n_out=0; n_pval=0; n_pval_ok=0; tags=None; tq=None;
for i,id_this in enumerate(ids):
if i<skip: continue
if not tq: tq = tqdm.tqdm(total=len(ids)-skip, unit=resource+"s")
tq.update()
url_next = (f"{api_base_path}/activity.json?{resource}_chembl_id={id_this}&limit={NCHUNK}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if rval is None: break
acts = rval["activities"] if "activities" in rval else []
for act in acts:
logging.debug(json.dumps(act, sort_keys=True, indent=2))
n_act+=1
if not tags:
tags = list(act.keys())
for tag in tags[:]:
if type(act[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(act[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(act[tag]) if tag in act else '') for tag in tags]
if pmin is not None:
try:
pval = float(act["pchembl_value"])
n_pval+=1
if pval >= pmin:
n_pval_ok+=1
fout.write('\t'.join(vals)+'\n')
n_out+=1
logging.debug(f"[{n_act}] pVal ok ({pval:4.1f} >= {pmin:4.1f})")
else:
logging.debug(f"[{n_act}] pVal low ({pval:4.1f} < {pmin:4.1f})")
except:
logging.debug(f"[{n_act}] pVal missing.")
else:
fout.write('\t'.join(vals)+'\n')
n_out+=1
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if nmax and i>=(nmax-skip): break
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_act: {n_act}; n_out: {n_out}")
if pmin is not None:
logging.info(f"n_pval: {n_pval}; n_pval_ok: {n_pval_ok}; pVals missing: {n_act-n_pval}")
#############################################################################
def GetActivityProperties(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
n_out=0; tags=None;
for i,id_this in enumerate(ids):
if i<skip: continue
act = rest.Utils.GetURL((f"{base_url}/activity/{id_this}.json"), parse_json=True)
assay_chembl_id = act["assay_chembl_id"] if "assay_chembl_id" in act else ""
molecule_chembl_id = act["molecule_chembl_id"] if "molecule_chembl_id" in act else ""
props = act["activity_properties"] if "activity_properties" in act else []
for prop in props:
if not tags:
tags = list(prop.keys())
fout.write('\t'.join(["activity_id", "assay_chembl_id", "molecule_chembl_id"]+tags)+"\n")
logging.debug(json.dumps(prop, sort_keys=True, indent=2))
vals = [str(prop[tag]) if tag in prop else "" for tag in tags]
fout.write(('\t'.join([id_this, assay_chembl_id, molecule_chembl_id]+vals))+'\n')
n_out+=1
if nmax and i>=(nmax-skip): break
logging.info(f"n_qry: {len(ids)}; n_out: {n_out}")
#############################################################################
def ListTargets(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''One row per target. Ignore synonyms. If one-component, single protein, include UniProt accession.'''
n_tgt=0; n_cmt=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/target.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
tgts = rval["targets"] if rval and "targets" in rval else []
for tgt in tgts:
logging.debug(json.dumps(tgt, indent=2))
n_tgt+=1
if not tags:
tags = sorted(tgt.keys())
for tag in tags[:]:
if type(tgt[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(tgt[tag])}): "{tag}"')
tags.extend(["component_count", "accession"])
fout.write('\t'.join(tags)+'\n')
vals = [str(tgt[tag]) if tag in tgt else "" for tag in tags]
if "target_components" in tgt and tgt["target_components"]:
cmts = tgt["target_components"]
n_cmt+=len(cmts)
vals.append(f"{len(cmts)}")
vals.append(cmts[0]["accession"] if len(cmts)==1 else "")
else:
logging.debug(f"no-component target: {vals[0]}")
vals.extend(["", ""])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="tgts")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_targets: {n_tgt}; n_target_components: {n_cmt}; n_out: {n_out}")
#############################################################################
def GetTarget(ids, base_url=BASE_URL, fout=None):
'''One row per target. Ignore synonyms. If one-component, single protein, include UniProt accession.'''
n_tgt=0; n_cmt=0; n_out=0; tags=None; tq=None;
for id_this in ids:
tgt = rest.Utils.GetURL(f"{base_url}/target/{id_this}.json", parse_json=True)
if not tgt:
logging.error(f'Not found: "{id_this}"')
continue
n_tgt+=1
if not tq: tq = tqdm.tqdm(total=len(ids), unit="tgts")
tq.update()
if n_tgt==1 or not tags:
tags = sorted(tgt.keys())
for tag in tags[:]:
if type(tgt[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(tgt[tag])}): "{tag}"')
tags.extend(["component_count", "accession"])
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(tgt,sort_keys=True,indent=2))
vals = [str(tgt[tag]) if tag in tgt else "" for tag in tags]
if "target_components" in tgt and tgt["target_components"]:
cmts = tgt["target_components"]
n_cmt+=len(cmts)
vals.append(f"{len(cmts)}")
vals.append(str(cmts[0]["accession"]) if len(cmts)==1 else "")
else:
logging.debug(f"no-component target: {vals[0]}")
vals.extend(["", ""])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_targets: {n_tgt}; n_target_components: {n_cmt}; n_out: {n_out}")
#############################################################################
def GetTargetComponents(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
n_tgt=0; n_out=0; tags=[]; cmt_tags=[]; df=None; tq=None;
for i,id_this in enumerate(ids):
if i<skip: continue
if not tq: tq = tqdm.tqdm(total=len(ids)-skip, unit="tgts")
tq.update()
tgt = rest.Utils.GetURL(f"{base_url}/target/{id_this}.json", parse_json=True)
if not tgt: continue
n_tgt+=1
vals = [str(tgt[tag]) if tag in tgt else "" for tag in tags]
cmts = tgt["target_components"] if "target_components" in tgt and tgt["target_components"] else []
if not cmts: continue
for cmt in cmts:
logging.debug(json.dumps(cmt, indent=2))
if not tags:
for tag in tgt.keys():
if type(tgt[tag]) not in (dict, list, tuple):
tags.append(tag)
for tag in cmt.keys():
if type(cmt[tag]) not in (dict, list, tuple):
cmt_tags.append(tag)
df_this = pd.concat([
pd.DataFrame({tag:[(tgt[tag] if tag in tgt else None)] for tag in tags}),
pd.DataFrame({tag:[(cmt[tag] if tag in cmt else None)] for tag in cmt_tags})],
axis=1)
if fout is None:
df = pd.concat([df, df_this])
else:
df_this.to_csv(fout, "\t", index=False)
n_out+=df_this.shape[0]
if nmax and i>=(nmax-skip): break
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_targets: {n_tgt}; n_out: {n_out}")
if fout is None: return df
#############################################################################
def GetDocument(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
n_pmid=0; n_doi=0; n_out=0; tags=None; tq=None;
for i,id_this in enumerate(ids):
if i<skip: continue
if not tq: tq = tqdm.tqdm(total=len(ids)-skip, unit="docs")
tq.update()
doc = rest.Utils.GetURL(f"{base_url}/document/{id_this}.json", parse_json=True)
if not doc:
logging.error(f'Not found: "{id_this}"')
continue
if not tags:
tags = list(doc.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(doc, sort_keys=True, indent=2))
if "pubmed_id" in tags and doc["pubmed_id"]: n_pmid+=1
if "doi" in tags and doc["doi"]: n_doi+=1
vals = [str(doc[tag]) if tag in doc else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_pmid: {n_pmid}; n_doi: {n_doi}; n_out: {n_out}")
#############################################################################
def ListSources(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None;
url_next = (api_base_path+"/source.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
sources = rval["sources"] if "sources" in rval else []
for source in sources:
if not tags:
tags = list(source.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(source, sort_keys=True, indent=2))
vals = [str(source[tag]) if tag in source else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}")
#############################################################################
def ListCellLines(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_clo=0; n_efo=0; n_out=0; tags=None;
url_next = (api_base_path+"/cell_line.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
cells = rval["cell_lines"] if "cell_lines" in rval else []
for cell in cells:
if not tags:
tags = list(cell.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(cell, sort_keys=True, indent=2))
if "clo_id" in cell and cell["clo_id"]: n_clo+=1
if "efo_id" in cell and cell["efo_id"]: n_efo+=1
vals = [str(cell[tag]) if tag in cell else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}; n_clo: {n_clo}; n_efo: {n_efo}")
#############################################################################
def ListOrganisms(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None;
url_next = (api_base_path+"/organism.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
orgs = rval["organisms"] if "organisms" in rval else []
for org in orgs:
if not tags:
tags = list(org.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(org, sort_keys=True, indent=2))
vals = [str(org[tag]) if tag in org else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}")
#############################################################################
def ListProteinClasses(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None;
url_next = (api_base_path+"/protein_class.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
pcls = rval["protein_classes"] if "protein_classes" in rval else []
for pcl in pcls:
if not tags:
tags = list(pcl.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(pcl, sort_keys=True, indent=2))
vals = [str(pcl[tag]) if tag in pcl else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}")
#############################################################################
def ListDrugIndications(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_efo=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/drug_indication.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
dins = rval["drug_indications"] if "drug_indications" in rval else []
for din in dins:
if not tags:
tags = list(din.keys())
for tag in tags[:]:
if type(din[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(din[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(din, sort_keys=True, indent=2))
if "efo_id" in din and din["efo_id"]: n_efo+=1
vals = [str(din[tag]) if tag in din else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="inds")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}; n_efo: {n_efo}")
#############################################################################
def ListTissues(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_bto=0; n_efo=0; n_caloha=0; n_uberon=0; n_out=0; tags=None; tq=None;
url_next = (api_base_path+"/tissue.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
tissues = rval["tissues"] if "tissues" in rval else []
for tissue in tissues:
if not tags:
tags = list(tissue.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(tissue, sort_keys=True, indent=2))
if "bto_id" in tissue and tissue["bto_id"]: n_bto+=1
if "efo_id" in tissue and tissue["efo_id"]: n_efo+=1
if "uberon_id" in tissue and tissue["uberon_id"]: n_uberon+=1
if "caloha_id" in tissue and tissue["caloha_id"]: n_caloha+=1
vals = [str(tissue[tag]) if tag in tissue else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="tissues")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}; n_bto: {n_bto}; n_efo: {n_efo}; n_caloha: {n_caloha}; n_uberon: {n_uberon}")
#############################################################################
def ListMechanisms(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None; tq=None;
url_next = (api_base_path+"/mechanism.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
mechs = rval["mechanisms"] if "mechanisms" in rval else []
for mech in mechs:
if not tags:
tags = list(mech.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(mech, sort_keys=True, indent=2))
vals = [str(mech[tag]) if tag in mech else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="mechs")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
#############################################################################
def ListDocuments(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_pmid=0; n_doi=0; n_out=0; n_err=0; tags=None; tq=None;
url_next = (f"{api_base_path}/document.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
docs = rval["documents"] if "documents" in rval else []
for doc in docs:
if not tags:
tags = list(doc.keys())
if "abstract" in tags: tags.remove("abstract") #unnecessary, verbose
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(doc, sort_keys=True, indent=2))
if "pubmed_id" in tags and doc["pubmed_id"]: n_pmid+=1
if "doi" in tags and doc["doi"]: n_doi+=1
vals = [str(doc[tag]) if tag in doc else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="docs")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}; n_pmid: {n_pmid}; n_doi: {n_doi}")
#############################################################################
def GetAssay(ids, base_url=BASE_URL, fout=None):
n_out=0; tags=None;
for id_this in ids:
assay = rest.Utils.GetURL(f"{base_url}/assay/{id_this}.json", parse_json=True)
if not assay:
continue
if not tags:
tags = list(assay.keys())
for tag in tags[:]:
if type(assay[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(assay[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(assay[tag]) if tag in assay else "") for tag in tags]
fout.write('\t'.join(vals)+'\n')
n_out+=1
logging.info(f"n_in: {len(ids)}; n_out: {n_out}")
#############################################################################
def ListAssays(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_ass=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/assay.json?offset={skip}&limit={NCHUNK}")
t0 = time.time()
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
assays = rval["assays"] if "assays" in rval else []
for assay in assays:
n_ass+=1
if not tags:
tags = list(assay.keys())
for tag in tags[:]:
if type(assay[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(assay[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(assay[tag]).replace('\t', " ") if tag in assay else "") for tag in tags]
fout.write('\t'.join(vals)+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="assays")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
logging.info(f"""Elapsed time: {time.strftime('%Hh:%Mm:%Ss', time.gmtime(time.time()-t0))}""")
#############################################################################
def SearchAssays(asrc, atype, skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''Select assays based on source and optionally type.'''
n_ass=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/assay.json?offset={skip}&limit={NCHUNK}")
if asrc: url_next+=(f"&src_id={asrc}")
if atype: url_next+=(f"&assay_type={atype}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
assays = rval["assays"] if "assays" in rval else []
for assay in assays:
n_ass+=1
if not tags:
tags = list(assay.keys())
for tag in tags[:]:
if type(assay[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(assay[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(assay[tag]) if tag in assay else "") for tag in tags]
fout.write('\t'.join(vals)+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="assays")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_assay: {n_ass}; n_out: {n_out}")
##############################################################################
def GetMolecule(ids, base_url=BASE_URL, fout=None):
'''Ignore molecule_synonyms.'''
n_out=0; tags=None; struct_tags=None; prop_tags=None; tq=None;
for id_this in ids:
mol = rest.Utils.GetURL(f"{base_url}/molecule/{id_this}.json", parse_json=True)
if not mol: continue
if not tq: tq = tqdm.tqdm(total=len(ids), unit="mols")
tq.update()
if not tags:
tags = sorted(list(mol.keys()))
for tag in tags[:]:
if type(mol[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(mol[tag])}): "{tag}"')
struct_tags = sorted(mol["molecule_structures"].keys())
struct_tags.remove("molfile")
prop_tags = sorted(mol["molecule_properties"].keys())
fout.write('\t'.join(tags+struct_tags+prop_tags+["parent_chembl_id"])+'\n')
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
vals = [(mol["molecule_hierarchy"]["parent_chembl_id"] if "molecule_hierarchy" in mol and "parent_chembl_id" in mol["molecule_hierarchy"] else "")]
vals.extend([(str(mol[tag]) if tag in mol else "") for tag in tags])
vals.extend([(str(mol["molecule_structures"][tag]) if "molecule_structures" in mol and tag in mol["molecule_structures"] else "") for tag in struct_tags])
vals.extend([(str(mol["molecule_properties"][tag]) if "molecule_properties" in mol and tag in mol["molecule_properties"] else "") for tag in prop_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_in: {len(ids)}; n_out: {n_out}")
#############################################################################
def ListMolecules(dev_phase, skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''Ignore synonyms here.'''
n_mol=0; n_out=0; n_err=0; tags=None; struct_tags=None; prop_tags=None; tq=None;
url_next=(f"{api_base_path}/molecule.json?limit={NCHUNK}")
if skip: url_next+=(f"&offset={skip}")
if dev_phase: url_next+=(f"&max_phase={dev_phase}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
mols = rval["molecules"] if "molecules" in rval else []
for mol in mols:
n_mol+=1
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
if not tags:
tags = sorted(mol.keys())
for tag in tags[:]:
if type(mol[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(mol[tag])}): "{tag}"')
struct_tags = sorted(mol["molecule_structures"].keys())
struct_tags.remove("molfile")
prop_tags = sorted(mol["molecule_properties"].keys())
fout.write('\t'.join(tags+struct_tags+prop_tags+["parent_chembl_id"])+'\n')
vals = [(mol["molecule_hierarchy"]["parent_chembl_id"] if "molecule_hierarchy" in mol and mol["molecule_hierarchy"] and "parent_chembl_id" in mol["molecule_hierarchy"] else "")]
vals.extend([(str(mol[tag]) if tag in mol else "") for tag in tags])
vals.extend([(str(mol["molecule_structures"][tag]) if "molecule_structures" in mol and mol["molecule_structures"] and tag in mol["molecule_structures"] else "") for tag in struct_tags])
vals.extend([(str(mol["molecule_properties"][tag]) if "molecule_properties" in mol and mol["molecule_properties"] and tag in mol["molecule_properties"] else "") for tag in prop_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if nmax and n_mol>=nmax: break
if nmax and n_mol>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="mols")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
#############################################################################
def ListDrugs(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_mol=0; n_out=0; n_err=0; tags=None; struct_tags=None; prop_tags=None; tq=None;
url_next = (f"{api_base_path}/drug.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
mols = rval["drugs"] if "drugs" in rval else []
for mol in mols:
n_mol+=1
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
if not tags:
tags = sorted(mol.keys())
for tag in tags[:]:
if type(mol[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(mol[tag])}): "{tag}"')
struct_tags = sorted(mol["molecule_structures"].keys())
struct_tags.remove("molfile")
prop_tags = sorted(mol["molecule_properties"].keys())
fout.write('\t'.join(tags+struct_tags+prop_tags+["parent_chembl_id"])+'\n')
vals = [(mol["molecule_hierarchy"]["parent_chembl_id"] if "molecule_hierarchy" in mol and mol["molecule_hierarchy"] and "parent_chembl_id" in mol["molecule_hierarchy"] else "")]
vals.extend([(str(mol[tag]) if tag in mol else "") for tag in tags])
vals.extend([(str(mol["molecule_structures"][tag]) if "molecule_structures" in mol and mol["molecule_structures"] and tag in mol["molecule_structures"] else "") for tag in struct_tags])
vals.extend([(str(mol["molecule_properties"][tag]) if "molecule_properties" in mol and mol["molecule_properties"] and tag in mol["molecule_properties"] else "") for tag in prop_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_mol>=nmax: break
if nmax and n_mol>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="drugs")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
##############################################################################
def SearchMoleculeByName(ids, base_url=BASE_URL, fout=None):
"""IDs should be names/synonyms."""
n_out=0; n_notfound=0; synonym_tags=None;
tags = ["molecule_chembl_id"]
for id_this in ids:
rval = rest.Utils.GetURL(f"{base_url}/molecule/search?q={urllib.parse.quote(id_this)}", headers={"Accept":"application/json"}, parse_json=True)
if not rval:
logging.info(f'Not found: "{id_this}"')
n_notfound+=1
continue
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
mols = rval["molecules"] if "molecules" in rval else []
for mol in mols:
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
synonyms = mol["molecule_synonyms"] if "molecule_synonyms" in mol else []
for synonym in synonyms:
if not synonym_tags:
synonym_tags = list(synonym.keys())
fout.write('\t'.join(tags+synonym_tags)+'\n')
molecule_synonym = synonym["molecule_synonym"] if "molecule_synonym" in synonym else ""
if not re.search(id_this, molecule_synonym, re.I):
continue
vals = [(mol["molecule_chembl_id"] if "molecule_chembl_id" in mol else "")]
vals.extend([(str(synonym[tag]) if tag in synonym else "") for tag in synonym_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
logging.info(f"n_in: {len(ids)}; n_found: {len(ids)-n_notfound}; n_out: {n_out}")
#############################################################################
def GetMoleculeByInchikey(ids, base_url=BASE_URL, fout=None):
"""Requires InChI key, e.g. "GHBOEFUAGSHXPO-XZOTUCIWSA-N"."""
n_out=0; tags=[]; struct_tags=[]; df=None; tq=None;
for id_this in ids:
if not tq: tq = tqdm.tqdm(total=len(ids), unit="mols")
tq.update()
mol = rest.Utils.GetURL(f"{base_url}/molecule/{id_this}.json", parse_json=True)
if not mol:
continue
struct = mol["molecule_structures"] if "molecule_structures" in mol else None
if not struct: continue
if not tags:
for tag in mol.keys():
if type(mol[tag]) not in (list,dict): tags.append(tag)
for tag in struct.keys():
if type(struct[tag]) not in (list,dict): struct_tags.append(tag)
struct_tags.remove("molfile")
df_this = pd.concat([
pd.DataFrame({tag:[(mol[tag] if tag in mol else None)] for tag in tags}),
pd.DataFrame({tag:[(struct[tag] if tag in struct else None)] for tag in struct_tags})],
axis=1)
if fout is None:
df = pd.concat([df, df_this])
else:
df_this.to_csv(fout, "\t", index=False)
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_out: {n_out}; n_not_found: {len(ids)-n_out}")
if fout is None: return df
#############################################################################
| 48.145299
| 199
| 0.606574
|
48a763ca25144e0b8da2e5a6c9e945ea3040660a
| 9,364
|
py
|
Python
|
tests/integrations/test_torch.py
|
borisgrafx/client
|
c079f7816947a3092b500751eb920fda3866985f
|
[
"MIT"
] | null | null | null |
tests/integrations/test_torch.py
|
borisgrafx/client
|
c079f7816947a3092b500751eb920fda3866985f
|
[
"MIT"
] | null | null | null |
tests/integrations/test_torch.py
|
borisgrafx/client
|
c079f7816947a3092b500751eb920fda3866985f
|
[
"MIT"
] | null | null | null |
import wandb
import pytest
import sys
if sys.version_info >= (3, 9):
pytest.importorskip("pytorch", reason="pytorch doesnt support py3.9 yet")
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError:
class nn:
Module = object
pytestmark = pytest.mark.skipif(
sys.version_info < (3, 5), reason="PyTorch no longer supports py2"
)
def dummy_torch_tensor(size, requires_grad=True):
return torch.ones(size, requires_grad=requires_grad)
class DynamicModule(nn.Module):
def __init__(self):
super(DynamicModule, self).__init__()
self.choices = nn.ModuleDict(
{"conv": nn.Conv2d(10, 10, 3), "pool": nn.MaxPool2d(3)}
)
self.activations = nn.ModuleDict(
[["lrelu", nn.LeakyReLU()], ["prelu", nn.PReLU()]]
)
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
class EmbModel(nn.Module):
def __init__(self, x=16, y=32):
super().__init__()
self.emb1 = nn.Embedding(x, y)
self.emb2 = nn.Embedding(x, y)
def forward(self, x):
return {"key": {"emb1": self.emb1(x), "emb2": self.emb2(x),}}
class EmbModelWrapper(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.emb = EmbModel(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.emb(*args, **kwargs)
class Discrete(nn.Module):
def __init__(self):
super(Discrete, self).__init__()
def forward(self, x):
return nn.functional.softmax(x, dim=0)
class DiscreteModel(nn.Module):
def __init__(self, num_outputs=2):
super(DiscreteModel, self).__init__()
self.linear1 = nn.Linear(1, 10)
self.linear2 = nn.Linear(10, num_outputs)
self.dist = Discrete()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return self.dist(x)
class ParameterModule(nn.Module):
def __init__(self):
super(ParameterModule, self).__init__()
self.params = nn.ParameterList(
[nn.Parameter(torch.ones(10, 10)) for i in range(10)]
)
self.otherparam = nn.Parameter(torch.Tensor(5))
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
class Sequence(nn.Module):
def __init__(self):
super(Sequence, self).__init__()
self.lstm1 = nn.LSTMCell(1, 51)
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)
def forward(self, input, future=0):
outputs = []
h_t = dummy_torch_tensor((input.size(0), 51))
c_t = dummy_torch_tensor((input.size(0), 51))
h_t2 = dummy_torch_tensor((input.size(0), 51))
c_t2 = dummy_torch_tensor((input.size(0), 51))
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
for i in range(future): # if we should predict the future
h_t, c_t = self.lstm1(output, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def init_conv_weights(layer, weights_std=0.01, bias=0):
"""Initialize weights for subnet convolution"""
nn.init.normal_(layer.weight.data, std=weights_std)
nn.init.constant_(layer.bias.data, val=bias)
return layer
def conv3x3(in_channels, out_channels, **kwargs):
"""Return a 3x3 convolutional layer for SubNet"""
layer = nn.Conv2d(in_channels, out_channels, kernel_size=3, **kwargs)
layer = init_conv_weights(layer)
return layer
def test_all_logging(live_mock_server, test_settings, parse_ctx):
# TODO(jhr): does not work with --flake-finder
run = wandb.init(settings=test_settings)
net = ConvNet()
wandb.watch(net, log="all", log_freq=1)
for i in range(3):
output = net(dummy_torch_tensor((32, 1, 28, 28)))
grads = torch.ones(32, 10)
output.backward(grads)
run.log({"a": 2})
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
assert len(ctx_util.history) == 3
for i in range(3):
ctx_util.history[i]["_step"] == i
assert len(ctx_util.history[i]) == 20
assert len(ctx_util.history[i]["parameters/fc2.bias"]["bins"]) == 65
assert len(ctx_util.history[i]["gradients/fc2.bias"]["bins"]) == 65
def test_double_log(wandb_init_run):
net = ConvNet()
wandb.watch(net, log_graph=True)
with pytest.raises(ValueError):
wandb.watch(net, log_graph=True)
def test_embedding_dict_watch(live_mock_server, test_settings, parse_ctx):
run = wandb.init(settings=test_settings)
model = EmbModelWrapper()
wandb.watch(model, log_freq=1, idx=0)
opt = torch.optim.Adam(params=model.parameters())
inp = torch.randint(16, [8, 5])
out = model(inp)
out = (out["key"]["emb1"]).sum(-1)
loss = F.mse_loss(out, inp.float())
loss.backward()
opt.step()
run.log({"loss": loss})
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
print(ctx_util.history)
assert len(ctx_util.history[0]["gradients/emb.emb1.weight"]["bins"]) == 65
assert ctx_util.history[0]["gradients/emb.emb1.weight"]["_type"] == "histogram"
@pytest.mark.timeout(120)
def test_sequence_net(wandb_init_run):
net = Sequence()
graph = wandb.watch(net, log_graph=True)[0]
output = net.forward(dummy_torch_tensor((97, 100)))
output.backward(torch.zeros((97, 100)))
graph = graph._to_graph_json()
assert len(graph["nodes"]) == 3
assert len(graph["nodes"][0]["parameters"]) == 4
assert graph["nodes"][0]["class_name"] == "LSTMCell(1, 51)"
assert graph["nodes"][0]["name"] == "lstm1"
@pytest.mark.skipif(
sys.platform == "darwin", reason="TODO: [Errno 24] Too many open files?!?"
)
def test_multi_net(wandb_init_run):
net1 = ConvNet()
net2 = ConvNet()
graphs = wandb.watch((net1, net2), log_graph=True)
output1 = net1.forward(dummy_torch_tensor((64, 1, 28, 28)))
output2 = net2.forward(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output1.backward(grads)
output2.backward(grads)
graph1 = graphs[0]._to_graph_json()
graph2 = graphs[1]._to_graph_json()
assert len(graph1["nodes"]) == 5
assert len(graph2["nodes"]) == 5
def test_nested_shape():
shape = wandb.wandb_torch.nested_shape([2, 4, 5])
assert shape == [[], [], []]
shape = wandb.wandb_torch.nested_shape(
[dummy_torch_tensor((2, 3)), dummy_torch_tensor((4, 5))]
)
assert shape == [[2, 3], [4, 5]]
# create recursive lists of tensors (t3 includes itself)
t1 = dummy_torch_tensor((2, 3))
t2 = dummy_torch_tensor((4, 5))
t3 = [t1, t2]
t3.append(t3)
t3.append(t2)
shape = wandb.wandb_torch.nested_shape([t1, t2, t3])
assert shape == [[2, 3], [4, 5], [[2, 3], [4, 5], 0, [4, 5]]]
@pytest.mark.parametrize(
"test_input,expected",
[
(torch.Tensor([1.0, 2.0, 3.0]), False),
(torch.Tensor([0.0, 0.0, 0.0]), False),
(torch.Tensor([1.0]), False),
(torch.Tensor([]), True),
(torch.Tensor([1.0, float("nan"), float("nan")]), False),
(torch.Tensor([1.0, float("inf"), -float("inf")]), False),
(torch.Tensor([1.0, float("nan"), float("inf")]), False),
(torch.Tensor([float("nan"), float("nan"), float("nan")]), True),
(torch.Tensor([float("inf"), float("inf"), -float("inf")]), True),
(torch.Tensor([float("nan"), float("inf"), -float("inf")]), True),
],
)
def test_no_finite_values(test_input, expected, wandb_init_run):
torch_history = wandb.wandb_torch.TorchHistory()
assert torch_history._no_finite_values(test_input) is expected
@pytest.mark.parametrize(
"test_input,expected",
[
(torch.Tensor([0.0, 1.0, 2.0]), torch.Tensor([0.0, 1.0, 2.0])),
(torch.Tensor([1.0]), torch.Tensor([1.0])),
(torch.Tensor([0.0, float("inf"), -float("inf")]), torch.Tensor([0.0])),
(torch.Tensor([0.0, float("nan"), float("inf")]), torch.Tensor([0.0])),
],
)
def test_remove_infs_nans(test_input, expected, wandb_init_run):
torch_history = wandb.wandb_torch.TorchHistory()
assert torch.equal(torch_history._remove_infs_nans(test_input), expected)
| 32.289655
| 83
| 0.61352
|
8347ef1a445c89b3c8950c6d513a850a7d1a2a59
| 842
|
py
|
Python
|
SPViT_Swin/data/samplers.py
|
zhuang-group/SPViT
|
74f08c6e55fb6adc0322722cedfd2c25ebdee999
|
[
"Apache-2.0"
] | 69
|
2021-11-23T23:44:38.000Z
|
2022-03-15T01:27:30.000Z
|
SPViT_Swin/data/samplers.py
|
zip-group/SPViT
|
74f08c6e55fb6adc0322722cedfd2c25ebdee999
|
[
"Apache-2.0"
] | null | null | null |
SPViT_Swin/data/samplers.py
|
zip-group/SPViT
|
74f08c6e55fb6adc0322722cedfd2c25ebdee999
|
[
"Apache-2.0"
] | 10
|
2021-11-25T01:26:38.000Z
|
2022-03-14T04:59:42.000Z
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Modifications copyright (c) 2021 Zhuang AI Group, Haoyu He
import torch
class SubsetRandomSampler(torch.utils.data.Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.epoch = 0
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
def set_epoch(self, epoch):
self.epoch = epoch
| 27.16129
| 84
| 0.585511
|
e893ec9131e02d63616c5510a1a5d0f8ae282fb7
| 731
|
py
|
Python
|
foolbox/__init__.py
|
mkyybx/foolbox
|
00b2dcc5ed30b12f28431e9dabe4d2bbc214d444
|
[
"MIT"
] | 4
|
2021-11-12T04:06:32.000Z
|
2022-01-27T09:01:41.000Z
|
foolbox/__init__.py
|
pige2nd/foolbox
|
2daabba8355afce9dfbec3de8d71dadadcfbd10b
|
[
"MIT"
] | 1
|
2022-02-22T14:00:59.000Z
|
2022-02-25T08:57:29.000Z
|
foolbox/__init__.py
|
pige2nd/foolbox
|
2daabba8355afce9dfbec3de8d71dadadcfbd10b
|
[
"MIT"
] | 2
|
2020-11-27T00:03:48.000Z
|
2020-11-27T00:08:04.000Z
|
from os.path import join, dirname
with open(join(dirname(__file__), 'VERSION')) as f:
__version__ = f.read().strip()
from .rngs import rng # noqa: F401
from .rngs import nprng # noqa: F401
from .rngs import set_seeds # noqa: F401
from . import models # noqa: F401
from . import criteria # noqa: F401
from . import distances # noqa: F401
from . import attacks # noqa: F401
from . import batch_attacks # noqa: F401
from . import utils # noqa: F401
from . import gradient_estimators # noqa: F401
from .adversarial import Adversarial # noqa: F401
from .yielding_adversarial import YieldingAdversarial # noqa: F401
from .batching import run_parallel # noqa: F401
from .batching import run_sequential # noqa: F401
| 31.782609
| 67
| 0.733242
|
4d720f7125dc13f1ce2ea73da6991b36621b99d8
| 15,382
|
py
|
Python
|
mne/io/eeglab/tests/test_eeglab.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
mne/io/eeglab/tests/test_eeglab.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
mne/io/eeglab/tests/test_eeglab.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Mikolaj Magnuski <mmagnuski@swps.edu.pl>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
from copy import deepcopy
from distutils.version import LooseVersion
import os.path as op
import shutil
from unittest import SkipTest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_allclose)
import pytest
from scipy import io
from mne import write_events, read_epochs_eeglab
from mne.io import read_raw_eeglab
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets import testing
from mne.utils import requires_h5py, run_tests_if_main
from mne.annotations import events_from_annotations, read_annotations
from mne.io.eeglab.tests._utils import _read_eeglab_montage
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname_mat = op.join(base_dir, 'test_raw.set')
raw_fname_onefile_mat = op.join(base_dir, 'test_raw_onefile.set')
raw_fname_event_duration = op.join(base_dir, 'test_raw_event_duration.set')
epochs_fname_mat = op.join(base_dir, 'test_epochs.set')
epochs_fname_onefile_mat = op.join(base_dir, 'test_epochs_onefile.set')
raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat]
epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat]
raw_fname_h5 = op.join(base_dir, 'test_raw_h5.set')
raw_fname_onefile_h5 = op.join(base_dir, 'test_raw_onefile_h5.set')
epochs_fname_h5 = op.join(base_dir, 'test_epochs_h5.set')
epochs_fname_onefile_h5 = op.join(base_dir, 'test_epochs_onefile_h5.set')
raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5]
epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5]
raw_fnames = [raw_fname_mat, raw_fname_onefile_mat,
raw_fname_h5, raw_fname_onefile_h5]
montage_path = op.join(base_dir, 'test_chans.locs')
def _check_h5(fname):
if fname.endswith('_h5.set'):
try:
import h5py # noqa, analysis:ignore
except Exception:
raise SkipTest('h5py module required')
@requires_h5py
@testing.requires_testing_data
@pytest.mark.parametrize(
'fname', [raw_fname_mat, raw_fname_h5], ids=op.basename
)
def test_io_set_raw(fname):
"""Test importing EEGLAB .set files."""
montage = _read_eeglab_montage(montage_path)
montage.ch_names = [
'EEG {0:03d}'.format(ii) for ii in range(len(montage.ch_names))
]
_test_raw_reader(read_raw_eeglab, input_fname=fname)
# test that preloading works
raw0 = read_raw_eeglab(input_fname=fname, preload=True)
raw0.set_montage(montage)
raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto',
phase='zero')
# test that using uint16_codec does not break stuff
raw0 = read_raw_eeglab(input_fname=fname,
preload=False, uint16_codec='ascii')
raw0.set_montage(montage, update_ch_names=True)
@testing.requires_testing_data
def test_io_set_raw_more(tmpdir):
"""Test importing EEGLAB .set files."""
tmpdir = str(tmpdir)
# test reading file with one event (read old version)
eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
squeeze_me=True)['EEG']
# test negative event latencies
negative_latency_fname = op.join(tmpdir, 'test_negative_latency.set')
evnts = deepcopy(eeg.event[0])
evnts.latency = 0
io.savemat(negative_latency_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan,
'data': 'test_negative_latency.fdt',
'epoch': eeg.epoch, 'event': evnts,
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
negative_latency_fname.replace('.set', '.fdt'))
with pytest.warns(RuntimeWarning, match="has a sample index of -1."):
read_raw_eeglab(input_fname=negative_latency_fname, preload=True)
evnts.latency = -1
io.savemat(negative_latency_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan,
'data': 'test_negative_latency.fdt',
'epoch': eeg.epoch, 'event': evnts,
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
with pytest.raises(ValueError, match='event sample index is negative'):
with pytest.warns(RuntimeWarning, match="has a sample index of -1."):
read_raw_eeglab(input_fname=negative_latency_fname, preload=True)
# test overlapping events
overlap_fname = op.join(tmpdir, 'test_overlap_event.set')
io.savemat(overlap_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan, 'data': 'test_overlap_event.fdt',
'epoch': eeg.epoch,
'event': [eeg.event[0], eeg.event[0]],
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
overlap_fname.replace('.set', '.fdt'))
# test reading file with one channel
one_chan_fname = op.join(tmpdir, 'test_one_channel.set')
io.savemat(one_chan_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': 1, 'data': np.random.random((1, 3)),
'epoch': eeg.epoch, 'event': eeg.epoch,
'chanlocs': {'labels': 'E1', 'Y': -6.6069,
'X': 6.3023, 'Z': -2.9423},
'times': eeg.times[:3], 'pnts': 3}},
appendmat=False, oned_as='row')
read_raw_eeglab(input_fname=one_chan_fname, preload=True)
# test reading file with 3 channels - one without position information
# first, create chanlocs structured array
ch_names = ['F3', 'unknown', 'FPz']
x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan]
dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
nopos_dt = [('labels', 'S10'), ('Z', 'f8')]
chanlocs = np.zeros((3,), dtype=dt)
nopos_chanlocs = np.zeros((3,), dtype=nopos_dt)
for ind, vals in enumerate(zip(ch_names, x, y, z)):
for fld in range(4):
chanlocs[ind][dt[fld][0]] = vals[fld]
if fld in (0, 3):
nopos_chanlocs[ind][dt[fld][0]] = vals[fld]
# In theory this should work and be simpler, but there is an obscure
# SciPy writing bug that pops up sometimes:
# nopos_chanlocs = np.array(chanlocs[['labels', 'Z']])
if LooseVersion(np.__version__) == '1.14.0':
# There is a bug in 1.14.0 (or maybe with SciPy 1.0.0?) that causes
# this write to fail!
raise SkipTest('Need to fix bug in NumPy 1.14.0!')
# test reading channel names but not positions when there is no X (only Z)
# field in the EEG.chanlocs structure
nopos_fname = op.join(tmpdir, 'test_no_chanpos.set')
io.savemat(nopos_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3,
'data': np.random.random((3, 2)), 'epoch': eeg.epoch,
'event': eeg.epoch, 'chanlocs': nopos_chanlocs,
'times': eeg.times[:2], 'pnts': 2}},
appendmat=False, oned_as='row')
# load the file
raw = read_raw_eeglab(input_fname=nopos_fname, preload=True)
# test that channel names have been loaded but not channel positions
for i in range(3):
assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i])
assert_array_equal(raw.info['chs'][i]['loc'][:3],
np.array([np.nan, np.nan, np.nan]))
@pytest.mark.timeout(60) # ~60 sec on Travis OSX
@requires_h5py
@testing.requires_testing_data
@pytest.mark.parametrize('fnames', [epochs_mat_fnames, epochs_h5_fnames])
def test_io_set_epochs(fnames):
"""Test importing EEGLAB .set epochs files."""
epochs_fname, epochs_fname_onefile = fnames
with pytest.warns(RuntimeWarning, match='multiple events'):
epochs = read_epochs_eeglab(epochs_fname)
with pytest.warns(RuntimeWarning, match='multiple events'):
epochs2 = read_epochs_eeglab(epochs_fname_onefile)
# one warning for each read_epochs_eeglab because both files have epochs
# associated with multiple events
assert_array_equal(epochs.get_data(), epochs2.get_data())
@testing.requires_testing_data
def test_io_set_epochs_events(tmpdir):
"""Test different combinations of events and event_ids."""
tmpdir = str(tmpdir)
out_fname = op.join(tmpdir, 'test-eve.fif')
events = np.array([[4, 0, 1], [12, 0, 2], [20, 0, 3], [26, 0, 3]])
write_events(out_fname, events)
event_id = {'S255/S8': 1, 'S8': 2, 'S255/S9': 3}
out_fname = op.join(tmpdir, 'test-eve.fif')
epochs = read_epochs_eeglab(epochs_fname_mat, events, event_id)
assert_equal(len(epochs.events), 4)
assert epochs.preload
assert epochs._bad_dropped
epochs = read_epochs_eeglab(epochs_fname_mat, out_fname, event_id)
pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat,
None, event_id)
pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat,
epochs.events, None)
@testing.requires_testing_data
def test_degenerate(tmpdir):
"""Test some degenerate conditions."""
# test if .dat file raises an error
tmpdir = str(tmpdir)
eeg = io.loadmat(epochs_fname_mat, struct_as_record=False,
squeeze_me=True)['EEG']
eeg.data = 'epochs_fname.dat'
bad_epochs_fname = op.join(tmpdir, 'test_epochs.set')
io.savemat(bad_epochs_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan, 'data': eeg.data,
'epoch': eeg.epoch, 'event': eeg.event,
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
shutil.copyfile(op.join(base_dir, 'test_epochs.fdt'),
op.join(tmpdir, 'test_epochs.dat'))
with pytest.warns(RuntimeWarning, match='multiple events'):
pytest.raises(NotImplementedError, read_epochs_eeglab,
bad_epochs_fname)
@pytest.mark.parametrize("fname", raw_fnames)
@testing.requires_testing_data
def test_eeglab_annotations(fname):
"""Test reading annotations in EEGLAB files."""
_check_h5(fname)
annotations = read_annotations(fname)
assert len(annotations) == 154
assert set(annotations.description) == {'rt', 'square'}
assert np.all(annotations.duration == 0.)
@testing.requires_testing_data
def test_eeglab_read_annotations():
"""Test annotations onsets are timestamps (+ validate some)."""
annotations = read_annotations(raw_fname_mat)
validation_samples = [0, 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
expected_onset = np.array([1.00, 1.69, 2.08, 4.70, 7.71, 11.30, 17.18,
20.20, 26.12, 29.14, 35.25, 44.30, 47.15])
assert annotations.orig_time is None
assert_array_almost_equal(annotations.onset[validation_samples],
expected_onset, decimal=2)
# test if event durations are imported correctly
raw = read_raw_eeglab(raw_fname_event_duration, preload=True)
# file contains 3 annotations with 0.5 s (64 samples) duration each
assert_allclose(raw.annotations.duration, np.ones(3) * 0.5)
@testing.requires_testing_data
def test_eeglab_event_from_annot():
"""Test all forms of obtaining annotations."""
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname_mat = op.join(base_dir, 'test_raw.set')
raw_fname = raw_fname_mat
event_id = {'rt': 1, 'square': 2}
raw1 = read_raw_eeglab(input_fname=raw_fname, preload=False)
annotations = read_annotations(raw_fname)
assert len(raw1.annotations) == 154
raw1.set_annotations(annotations)
events_b, _ = events_from_annotations(raw1, event_id=event_id)
assert len(events_b) == 154
def _assert_array_allclose_nan(left, right):
assert_array_equal(np.isnan(left), np.isnan(right))
assert_allclose(left[~np.isnan(left)], right[~np.isnan(left)], atol=1e-8)
@pytest.fixture(scope='session')
def one_chanpos_fname(tmpdir_factory):
"""Test file with 3 channels to exercise EEGLAB reader.
File characteristics
- ch_names: 'F3', 'unknown', 'FPz'
- 'FPz' has no position information.
- the rest is aleatory
Notes from when this code was factorized:
# test reading file with one event (read old version)
"""
fname = str(tmpdir_factory.mktemp('data').join('test_chanpos.set'))
file_conent = dict(EEG={
'trials': 1, 'nbchan': 3, 'pnts': 3, 'epoch': [], 'event': [],
'srate': 128, 'times': np.array([0., 0.1, 0.2]),
'data': np.empty([3, 3]),
'chanlocs': np.array(
[(b'F3', 1., 4., 7.),
(b'unknown', 2., 5., 8.),
(b'FPz', np.nan, np.nan, np.nan)],
dtype=[('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
)
})
io.savemat(file_name=fname, mdict=file_conent, appendmat=False,
oned_as='row')
return fname
@testing.requires_testing_data
def test_position_information(one_chanpos_fname):
"""Test reading file with 3 channels - one without position information."""
nan = np.nan
EXPECTED_LOCATIONS_FROM_FILE = np.array([
[-4., 1., 7., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[-5., 2., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan],
])
EXPECTED_LOCATIONS_FROM_MONTAGE = np.array([
[-0.56705965, 0.67706631, 0.46906776, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan],
[0, 0.99977915, -0.02101571, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
montage = _read_eeglab_montage(montage_path)
raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True)
assert_array_equal(np.array([ch['loc'] for ch in raw.info['chs']]),
EXPECTED_LOCATIONS_FROM_FILE)
# To acomodate the new behavior so that:
# read_raw_eeglab(.. montage=montage) and raw.set_montage(montage)
# behaves the same we need to flush the montage. otherwise we get
# a mix of what is in montage and in the file
raw = read_raw_eeglab(
input_fname=one_chanpos_fname,
preload=True,
).set_montage(None) # Flush the montage builtin within input_fname
with pytest.raises(ValueError):
raw.set_montage(montage, update_ch_names=False)
_msg = (
'DigMontage is a only a subset of info. '
'Did not set 1 channel positions:\nunknown'
)
with pytest.warns(RuntimeWarning, match=_msg):
raw.set_montage(montage, update_ch_names=False, raise_if_subset=False)
_assert_array_allclose_nan(np.array([ch['loc'] for ch in raw.info['chs']]),
EXPECTED_LOCATIONS_FROM_MONTAGE)
run_tests_if_main()
| 42.142466
| 79
| 0.640554
|
d7745a84695df9a56808881d11f44f78afa54f84
| 1,964
|
py
|
Python
|
xortool/routine.py
|
emilbayes/xortool
|
efa8462251afad2156ceed9b6ee1f3a6a98ed89c
|
[
"MIT"
] | null | null | null |
xortool/routine.py
|
emilbayes/xortool
|
efa8462251afad2156ceed9b6ee1f3a6a98ed89c
|
[
"MIT"
] | null | null | null |
xortool/routine.py
|
emilbayes/xortool
|
efa8462251afad2156ceed9b6ee1f3a6a98ed89c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
#-*- coding:utf-8 -*-
import os
import sys
import string
class MkdirError(Exception):
pass
def load_file(filename):
if filename == "-":
return sys.stdin.read()
fd = open(filename, "rb")
contents = fd.read()
fd.close()
return contents
def save_file(filename, data):
fd = open(filename, "wb")
fd.write(data)
fd.close()
return
def mkdir(dirname):
if os.path.exists(dirname):
return
try:
os.mkdir(dirname)
except BaseException as err:
raise MkdirError(str(err))
return
def rmdir(dirname):
if dirname[-1] == os.sep:
dirname = dirname[:-1]
if os.path.islink(dirname):
return # do not clear link - we can get out of dir
files = os.listdir(dirname)
for f in files:
if f == '.' or f == '..':
continue
path = dirname + os.sep + f
if os.path.isdir(path):
rmdir(path)
else:
os.unlink(path)
os.rmdir(dirname)
return
def decode_from_hex(text):
only_hex_digits = "".join([c for c in text if c in string.hexdigits])
return only_hex_digits.decode("hex")
def parse_char(ch):
"""
'A' or '\x41' or '41'
"""
if len(ch) == 1:
return ord(ch)
if ch[0:2] == "\\x":
ch = ch[2:]
if not ch:
raise ValueError("Empty char")
return ord(chr(int(ch, 16)))
def dexor(text, key):
ret = list(text)
mod = len(key)
for index, char in enumerate(ret):
ret[index] = chr(char ^ ord(key[index % mod]))
return "".join(ret)
def die(exitMessage, exitCode=1):
print(exitMessage)
sys.exit(exitCode)
def is_linux():
return sys.platform.startswith("linux")
def alphanum(s):
lst = list(s)
for index, char in enumerate(lst):
if char in (string.ascii_letters + string.digits):
continue
lst[index] = char.encode("hex")
return "".join(lst)
| 19.838384
| 73
| 0.570774
|
b1133c1a8d6aa1372328dcdb2198bda04f2d31ae
| 70
|
py
|
Python
|
rxntools/__init__.py
|
Xiangyan93/rxntools
|
b2e3437c70528675cbd019d0c5b6767bb2578151
|
[
"MIT"
] | null | null | null |
rxntools/__init__.py
|
Xiangyan93/rxntools
|
b2e3437c70528675cbd019d0c5b6767bb2578151
|
[
"MIT"
] | null | null | null |
rxntools/__init__.py
|
Xiangyan93/rxntools
|
b2e3437c70528675cbd019d0c5b6767bb2578151
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.0.2'
| 11.666667
| 23
| 0.557143
|
fa05523da252ef531644d9205021bdfb4b976b37
| 10,207
|
py
|
Python
|
packages/pegasus-python/test/cli/test_pegasus_graphviz.py
|
dhruvaop/pegasus-2
|
0ad4d93ca42d54713086ba8729f200c69ab6574d
|
[
"Apache-2.0"
] | null | null | null |
packages/pegasus-python/test/cli/test_pegasus_graphviz.py
|
dhruvaop/pegasus-2
|
0ad4d93ca42d54713086ba8729f200c69ab6574d
|
[
"Apache-2.0"
] | null | null | null |
packages/pegasus-python/test/cli/test_pegasus_graphviz.py
|
dhruvaop/pegasus-2
|
0ad4d93ca42d54713086ba8729f200c69ab6574d
|
[
"Apache-2.0"
] | null | null | null |
import importlib
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
from Pegasus.api import *
pegasus_graphviz = importlib.import_module("Pegasus.cli.pegasus-graphviz")
graph_node = pegasus_graphviz.Node
@pytest.fixture(scope="module")
def diamond_wf_file():
fa = File("f.a")
fb1 = File("f.b1")
fb2 = File("f.b2")
fc1 = File("f.c1")
fc2 = File("f.c2")
fd = File("f.d")
preprocess_checkpoint = File("preprocess_checkpoint.pkl")
Workflow("blackdiamond").add_jobs(
Job("preprocess", node_label="level1")
.add_inputs(fa)
.add_outputs(fb1, fb2)
.add_checkpoint(preprocess_checkpoint),
Job("findrange").add_inputs(fb1).add_outputs(fc1),
Job("findrange").add_inputs(fb2).add_outputs(fc2),
Job("analyze").add_inputs(fc1, fc2).add_outputs(fd),
).write("workflow.yml")
yield "workflow.yml"
Path("workflow.yml").unlink()
@pytest.fixture(scope="module")
def hierarchical_wf_file():
analysis_out_file = File("analysis_output")
h_wf = Workflow("hierarchical-wf")
analysis_wf_job = SubWorkflow(
"analysis-wf.yml", node_label="subwf1", is_planned=False
).add_outputs(analysis_out_file)
sleep_wf_job = SubWorkflow("sleep-wf.yml", node_label="subwf2", is_planned=False)
ls_job = Job("ls", _id="ls").add_inputs(analysis_out_file).set_stdout("ls_out.txt")
h_wf.add_jobs(analysis_wf_job, sleep_wf_job, ls_job)
h_wf.add_dependency(sleep_wf_job, children=[ls_job])
h_wf.write("h_workflow.yml")
yield "h_workflow.yml"
Path("h_workflow.yml").unlink()
@pytest.mark.parametrize(
"wf, expected_node_children",
[
(
Workflow("test").add_jobs(
Job("test", _id="j1").add_inputs("f1").add_outputs("f2")
),
{"j1": [graph_node("f2")], "f1": [graph_node("j1")], "f2": []},
),
(
Workflow("test").add_jobs(
Job("test", _id="j1").add_outputs("f1"),
Job("test", _id="j2").add_inputs("f1"),
),
{"j1": [graph_node("f1")], "f1": [graph_node("j2")], "j2": []},
),
(
Workflow("test").add_jobs(
Job("test", _id="j1").add_outputs("f1"),
Job("test", _id="j2").add_inputs("f1").add_outputs("f2"),
Job("test", _id="j3").add_inputs("f1", "f2"),
),
{
"j1": [graph_node("f1")],
"f1": [graph_node("j2"), graph_node("j3")],
"j2": [graph_node("f2")],
"j3": [],
},
),
(
Workflow("test").add_jobs(
Job("test", _id="j1").add_outputs("f1"),
Job("test", _id="j2").add_inputs("f1").add_outputs("f2"),
Job("test", _id="j3").add_inputs("f1", "f2").add_outputs("f3"),
Job("test", _id="j4").add_inputs("f1", "f3"),
),
{
"j1": [graph_node("f1")],
"f1": [graph_node("j2"), graph_node("j3"), graph_node("j4")],
"j2": [graph_node("f2")],
"f2": [graph_node("j3")],
"j3": [graph_node("f3")],
"f3": [graph_node("j4")],
"j4": [],
},
),
],
)
def test_transitivereduction(wf, expected_node_children):
with NamedTemporaryFile(mode="w+") as f:
wf.write(f)
f.seek(0)
dag = pegasus_graphviz.parse_yamlfile(f.name, include_files=True)
dag = pegasus_graphviz.transitivereduction(dag)
# sort each node children list in dag
for k, n in dag.nodes.items():
n.children.sort(key=lambda c: c.id)
# sort each node children list in expected_node_children
for _id, children in expected_node_children.items():
children.sort(key=lambda c: c.id)
# ensure that each node has the expected (reduced) list of children
for _id, children in expected_node_children.items():
assert dag.nodes[_id].children == children
class TestEmitDot:
def test_emit_dot_diamond_wf_yaml_file(self, diamond_wf_file):
# target dot file
dot_file = Path("wf.dot")
# invoke emit_dot on the diamond_wf_file
dag = pegasus_graphviz.parse_yamlfile(diamond_wf_file, include_files=False)
dag = pegasus_graphviz.transitivereduction(dag)
pegasus_graphviz.emit_dot(dag, outfile=str(dot_file), label_type="label-xform")
with dot_file.open("r") as f:
result = f.read()
# check that correct dot file written
assert result == (
"digraph dag {\n"
" ratio=fill\n"
' node [style=filled,color="#444444",fillcolor="#ffed6f"]\n'
" edge [arrowhead=normal,arrowsize=1.0]\n\n"
' "ID0000001" [shape=ellipse,color="#000000",fillcolor="#1b9e77",label="level1\\npreprocess"]\n'
' "ID0000002" [shape=ellipse,color="#000000",fillcolor="#d95f02",label="findrange"]\n'
' "ID0000003" [shape=ellipse,color="#000000",fillcolor="#d95f02",label="findrange"]\n'
' "ID0000004" [shape=ellipse,color="#000000",fillcolor="#7570b3",label="analyze"]\n'
' "ID0000001" -> "ID0000002" [color="#000000"]\n'
' "ID0000001" -> "ID0000003" [color="#000000"]\n'
' "ID0000002" -> "ID0000004" [color="#000000"]\n'
' "ID0000003" -> "ID0000004" [color="#000000"]\n'
"}\n"
)
# cleanup
dot_file.unlink()
def test_emit_dot_diamond_wf_yaml_file_including_file_nodes(self, diamond_wf_file):
# target dot file
dot_file = Path("wf.dot")
# invoke emit_dot on the diamond wf file, specifying that file nodes
# be included
dag = pegasus_graphviz.parse_yamlfile(diamond_wf_file, include_files=True)
dag = pegasus_graphviz.transitivereduction(dag)
pegasus_graphviz.emit_dot(dag, outfile=str(dot_file), label_type="label-id")
with dot_file.open("r") as f:
result = f.read()
# check that correct dot file written
assert result == (
"digraph dag {\n"
" ratio=fill\n"
' node [style=filled,color="#444444",fillcolor="#ffed6f"]\n'
" edge [arrowhead=normal,arrowsize=1.0]\n\n"
' "ID0000001" [shape=ellipse,color="#000000",fillcolor="#1b9e77",label="level1\\nID0000001"]\n'
' "ID0000002" [shape=ellipse,color="#000000",fillcolor="#d95f02",label="ID0000002"]\n'
' "ID0000003" [shape=ellipse,color="#000000",fillcolor="#d95f02",label="ID0000003"]\n'
' "ID0000004" [shape=ellipse,color="#000000",fillcolor="#7570b3",label="ID0000004"]\n'
' "f.a" [shape=rect,color="#000000",fillcolor="#ffed6f",label="f.a"]\n'
' "f.b1" [shape=rect,color="#000000",fillcolor="#ffed6f",label="f.b1"]\n'
' "f.b2" [shape=rect,color="#000000",fillcolor="#ffed6f",label="f.b2"]\n'
' "f.c1" [shape=rect,color="#000000",fillcolor="#ffed6f",label="f.c1"]\n'
' "f.c2" [shape=rect,color="#000000",fillcolor="#ffed6f",label="f.c2"]\n'
' "f.d" [shape=rect,color="#000000",fillcolor="#ffed6f",label="f.d"]\n'
' "preprocess_checkpoint.pkl" [shape=rect,color="#000000",fillcolor="#ffed6f",label="preprocess_checkpoint.pkl"]\n'
' "ID0000001" -> "f.b2" [color="#000000"]\n'
' "ID0000001" -> "preprocess_checkpoint.pkl" [color="#000000"]\n'
' "ID0000001" -> "f.b1" [color="#000000"]\n'
' "ID0000002" -> "f.c1" [color="#000000"]\n'
' "ID0000003" -> "f.c2" [color="#000000"]\n'
' "ID0000004" -> "f.d" [color="#000000"]\n'
' "f.a" -> "ID0000001" [color="#000000"]\n'
' "f.b1" -> "ID0000002" [color="#000000"]\n'
' "f.b2" -> "ID0000003" [color="#000000"]\n'
' "f.c1" -> "ID0000004" [color="#000000"]\n'
' "f.c2" -> "ID0000004" [color="#000000"]\n'
"}\n"
)
# cleanup
dot_file.unlink()
def test_emit_dot_hierarchical_wf_yaml_file(self, hierarchical_wf_file):
# target dot file
dot_file = Path("wf.dot")
# invoke emit_dot on the diamond_wf_file
dag = pegasus_graphviz.parse_yamlfile(hierarchical_wf_file, include_files=True)
dag = pegasus_graphviz.transitivereduction(dag)
pegasus_graphviz.emit_dot(dag, outfile=str(dot_file), label_type="label-xform")
with dot_file.open("r") as f:
result = f.read()
# check that correct dot file written
assert result == (
"digraph dag {\n"
" ratio=fill\n"
' node [style=filled,color="#444444",fillcolor="#ffed6f"]\n'
" edge [arrowhead=normal,arrowsize=1.0]\n\n"
' "ID0000001" [shape=ellipse,color="#000000",fillcolor="#1b9e77",label="subwf1\\nanalysis-wf.yml"]\n'
' "ID0000002" [shape=ellipse,color="#000000",fillcolor="#d95f02",label="subwf2\\nsleep-wf.yml"]\n'
' "analysis-wf.yml" [shape=rect,color="#000000",fillcolor="#ffed6f",label="analysis-wf.yml"]\n'
' "analysis_output" [shape=rect,color="#000000",fillcolor="#ffed6f",label="analysis_output"]\n'
' "ls" [shape=ellipse,color="#000000",fillcolor="#7570b3",label="ls"]\n'
' "ls_out.txt" [shape=rect,color="#000000",fillcolor="#ffed6f",label="ls_out.txt"]\n'
' "sleep-wf.yml" [shape=rect,color="#000000",fillcolor="#ffed6f",label="sleep-wf.yml"]\n'
' "ID0000001" -> "analysis_output" [color="#000000"]\n'
' "ID0000002" -> "ls" [color="#000000"]\n'
' "analysis-wf.yml" -> "ID0000001" [color="#000000"]\n'
' "analysis_output" -> "ls" [color="#000000"]\n'
' "ls" -> "ls_out.txt" [color="#000000"]\n'
' "sleep-wf.yml" -> "ID0000002" [color="#000000"]\n'
"}\n"
)
# cleanup
dot_file.unlink()
| 41.49187
| 130
| 0.559518
|
3ccf12da23ac62278e7facac2c7233e6567ce2f2
| 5,585
|
py
|
Python
|
dask_cuda/benchmarks/local_cupy_transpose_sum.py
|
efajardo-nv/dask-cuda
|
b5ae4fc872b057f9d71595fccaa6dfd61654e301
|
[
"Apache-2.0"
] | null | null | null |
dask_cuda/benchmarks/local_cupy_transpose_sum.py
|
efajardo-nv/dask-cuda
|
b5ae4fc872b057f9d71595fccaa6dfd61654e301
|
[
"Apache-2.0"
] | null | null | null |
dask_cuda/benchmarks/local_cupy_transpose_sum.py
|
efajardo-nv/dask-cuda
|
b5ae4fc872b057f9d71595fccaa6dfd61654e301
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import asyncio
from collections import defaultdict
from time import perf_counter as clock
import dask.array as da
from dask.distributed import Client, performance_report, wait
from dask.utils import format_bytes, format_time, parse_bytes
from dask_cuda.local_cuda_cluster import LocalCUDACluster
import cupy
import numpy as np
async def run(args):
# Set up workers on the local machine
async with LocalCUDACluster(
protocol=args.protocol,
n_workers=len(args.devs.split(",")),
CUDA_VISIBLE_DEVICES=args.devs,
ucx_net_devices="auto",
enable_infiniband=True,
enable_nvlink=True,
asynchronous=True,
) as cluster:
async with Client(cluster, asynchronous=True) as client:
def _worker_setup(size=None):
import rmm
rmm.reinitialize(
pool_allocator=not args.no_rmm_pool,
devices=0,
initial_pool_size=size,
)
cupy.cuda.set_allocator(rmm.rmm_cupy_allocator)
await client.run(_worker_setup)
# Create an RMM pool on the scheduler due to occasional deserialization
# of CUDA objects. May cause issues with InfiniBand otherwise.
await client.run_on_scheduler(_worker_setup, 1e9)
# Create a simple random array
rs = da.random.RandomState(RandomState=cupy.random.RandomState)
x = rs.random((args.size, args.size), chunks=args.chunk_size).persist()
await wait(x)
# Execute the operations to benchmark
if args.profile is not None:
async with performance_report(filename=args.profile):
t1 = clock()
await client.compute((x + x.T).sum())
took = clock() - t1
else:
t1 = clock()
await client.compute((x + x.T).sum())
took = clock() - t1
# Collect, aggregate, and print peer-to-peer bandwidths
incoming_logs = await client.run(
lambda dask_worker: dask_worker.incoming_transfer_log
)
bandwidths = defaultdict(list)
total_nbytes = defaultdict(list)
for k, L in incoming_logs.items():
for d in L:
if d["total"] >= args.ignore_size:
bandwidths[k, d["who"]].append(d["bandwidth"])
total_nbytes[k, d["who"]].append(d["total"])
bandwidths = {
(
cluster.scheduler.workers[w1].name,
cluster.scheduler.workers[w2].name,
): [
"%s/s" % format_bytes(x) for x in np.quantile(v, [0.25, 0.50, 0.75])
]
for (w1, w2), v in bandwidths.items()
}
total_nbytes = {
(
cluster.scheduler.workers[w1].name,
cluster.scheduler.workers[w2].name,
): format_bytes(sum(nb))
for (w1, w2), nb in total_nbytes.items()
}
print("Roundtrip benchmark")
print("--------------------------")
print(f"Size | {args.size}*{args.size}")
print(f"Chunk-size | {args.chunk_size}")
print(f"Ignore-size | {format_bytes(args.ignore_size)}")
print(f"Protocol | {args.protocol}")
print(f"Device(s) | {args.devs}")
print(f"npartitions | {x.npartitions}")
print("==========================")
print(f"Total time | {format_time(took)}")
print("==========================")
print("(w1,w2) | 25% 50% 75% (total nbytes)")
print("--------------------------")
for (d1, d2), bw in sorted(bandwidths.items()):
print(
"(%02d,%02d) | %s %s %s (%s)"
% (d1, d2, bw[0], bw[1], bw[2], total_nbytes[(d1, d2)])
)
def parse_args():
parser = argparse.ArgumentParser(
description="Transpose on LocalCUDACluster benchmark"
)
parser.add_argument(
"-d", "--devs", default="0", type=str, help='GPU devices to use (default "0").'
)
parser.add_argument(
"-p",
"--protocol",
choices=["tcp", "ucx"],
default="tcp",
type=str,
help="The communication protocol to use.",
)
parser.add_argument(
"-s",
"--size",
default="10000",
metavar="n",
type=int,
help="The size n in n^2 (default 10000)",
)
parser.add_argument(
"-c",
"--chunk-size",
default="128 MiB",
metavar="nbytes",
type=str,
help='Chunk size (default "128 MiB")',
)
parser.add_argument(
"--ignore-size",
default="1 MiB",
metavar="nbytes",
type=parse_bytes,
help='Ignore messages smaller than this (default "1 MB")',
)
parser.add_argument(
"--profile",
metavar="PATH",
default=None,
type=str,
help="Write dask profile report (E.g. dask-report.html)",
)
parser.add_argument(
"--no-rmm-pool", action="store_true", help="Disable the RMM memory pool"
)
args = parser.parse_args()
return args
def main():
args = parse_args()
asyncio.get_event_loop().run_until_complete(run(args))
if __name__ == "__main__":
main()
| 33.443114
| 88
| 0.520859
|
ad7f86280d66d5f6689ff0575eccf8ddeeb40e2c
| 514
|
py
|
Python
|
tests/r/test_ethanol.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199
|
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
tests/r/test_ethanol.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46
|
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
tests/r/test_ethanol.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45
|
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.ethanol import ethanol
def test_ethanol():
"""Test module ethanol.py by downloading
ethanol.csv and testing shape of
extracted data has 88 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = ethanol(test_path)
try:
assert x_train.shape == (88, 3)
except:
shutil.rmtree(test_path)
raise()
| 21.416667
| 43
| 0.752918
|
d03bc381718a3b77afdf1792234f0e100bfbb35e
| 5,284
|
py
|
Python
|
nonogram.py
|
micvri/python_scripts
|
914b8968db0ccb45ba5695d5acb9840c95f1d9ff
|
[
"MIT"
] | null | null | null |
nonogram.py
|
micvri/python_scripts
|
914b8968db0ccb45ba5695d5acb9840c95f1d9ff
|
[
"MIT"
] | null | null | null |
nonogram.py
|
micvri/python_scripts
|
914b8968db0ccb45ba5695d5acb9840c95f1d9ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#from termcolor import colored
#import copy
import sys
import os
sys.setrecursionlimit(5000)
input_template = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
# x, y different lengths possible
puzzle_input = [
[[2], [4], [2], [2, 2], [1]],
[[4], [4], [2], [1, 1], [1]]
]
len_puzzle_input_0 = len(puzzle_input[0])
len_puzzle_input_1 = len(puzzle_input[1])
puzzle_grid = []
for i in range(0, len(puzzle_input[1]) + 1):
tmp = []
for j in range(0, len(puzzle_input[0]) + 1):
tmp.append(0)
puzzle_grid.append(tmp)
#puzzle_grid = [
# [1,0,0,0,0,0],
# [1,1,0,0,1,0],
# [1,0,0,0,1,0],
# [1,0,1,1,1,0],
# [0,0,1,1,1,0],
# [0,0,0,0,0,0]]
initial_limit = 0
def print_grid():
y_len = 0
x_len = 0
for i in range(0, len_puzzle_input_0):
if len(puzzle_input[0][i]) > x_len:
x_len = len(puzzle_input[0][i])
for i in range(0, len_puzzle_input_1):
if len(puzzle_input[1][i]) > y_len:
y_len = len(puzzle_input[1][i])
for x in range(0, x_len):
for y in range(0, y_len):
print(" ", end="")
print("|", end="")
for i in range(0, len_puzzle_input_0):
if len(puzzle_input[0][i]) < x_len - x:
print(" ", end="")
else:
print(puzzle_input[0][i][x - x_len + len(puzzle_input[0][i])], end="")
print("")
for i in range(0, y_len):
print("-", end="")
print("/", end="")
for i in range(0, len_puzzle_input_0):
print("-", end="")
print("")
for i in range(0, len_puzzle_input_1):
for y in range(0, y_len):
if len(puzzle_input[1][i]) < y_len - y:
print(" ", end="")
else:
print(puzzle_input[1][i][y - y_len + len(puzzle_input[1][i])],
end = "")
print("|", end="")
for j in range(0, len(puzzle_grid[i]) - 1):
print(puzzle_grid[i][j], end="")
print("")
print("")
def check_correct():
for y in range(0, len_puzzle_input_1):
count = 0
i = 0
for x in range(0, len_puzzle_input_0 + 1):
# print("Row:")
# print("x,y =", x+1, y+1)
# print("val =", puzzle_grid[y][x])
if puzzle_grid[y][x] == 1:
count += 1
elif puzzle_grid[y][x - 1] == 1:
# print("prev =", puzzle_grid[y][x - 1])
if i >= len(puzzle_input[1][y]):
# print("c,i =", count, i)
if count > 0:
return False
else:
if count == puzzle_input[1][y][i]:
i += 1
count = 0
else:
return False
# print("c =", count)
# print("i =", i)
# print("")
if i < len(puzzle_input[1][y]):
return False
for x in range(0, len_puzzle_input_0):
count = 0
i = 0
for y in range(0, len_puzzle_input_1 + 1):
# print("Column:")
# print("x,y =", x+1, y+1)
# print("val =", puzzle_grid[y][x])
if puzzle_grid[y][x] == 1:
count += 1
elif puzzle_grid[y - 1][x] == 1:
# print("prev =", puzzle_grid[y - 1][x])
if i >= len(puzzle_input[0][x]):
# print("c,i =", count, i)
if count > 0:
return False
else:
if count == puzzle_input[0][x][i]:
i += 1
count = 0
else:
return False
# print("c =", count)
# print("i =", i)
# print("")
# print("c,i =", count, i)
if i < len(puzzle_input[0][x]):
return False
return True
last = (len(puzzle_grid[0]) - 1) * (len(puzzle_grid) - 1)
lookup_position = []
for place in range(0, last):
r = int((place - place % (len(puzzle_grid) - 1))/(len(puzzle_grid) - 1))
c = place % (len(puzzle_grid[0]) - 1)
lookup_position.append([r, c])
def recursion(limit, puzzle_grid):
global last
place = last - 1;
while place >= limit:
for i in range(place, last):
r = lookup_position[place][0]
c = lookup_position[place][1]
#print("")
#print("r,c =", r+1, c+1)
if puzzle_grid[r][c] == 0:
puzzle_grid[r][c] = 1
#if place <= 5:
# print_grid()
place = last - 1
break
else:
puzzle_grid[r][c] = 0
place -= 1
need_check = True
for i in range(0, len_puzzle_input_1):
if need_check:
if sum(puzzle_grid[i]) != sum(puzzle_input[1][i]):
need_check = False
if need_check:
if check_correct():
print_grid()
sys.exit()
# Main
print_grid()
recursion(initial_limit, puzzle_grid)
if found == False:
print("No solutions found")
| 29.853107
| 86
| 0.442468
|
d3ddc3e9abafb7e1f6fb206987e0d139d1a1a117
| 3,794
|
py
|
Python
|
game_states/Help.py
|
Atsocs/cobrITA
|
3bffe9feafc1da7ed00cc8e8c2914ef6b7ffea02
|
[
"MIT"
] | 1
|
2021-04-09T00:22:45.000Z
|
2021-04-09T00:22:45.000Z
|
game_states/Help.py
|
Atsocs/cobrITA
|
3bffe9feafc1da7ed00cc8e8c2914ef6b7ffea02
|
[
"MIT"
] | 18
|
2021-04-04T17:53:18.000Z
|
2021-05-04T22:28:49.000Z
|
game_states/Help.py
|
Atsocs/cobrITA
|
3bffe9feafc1da7ed00cc8e8c2914ef6b7ffea02
|
[
"MIT"
] | 1
|
2021-05-06T15:46:31.000Z
|
2021-05-06T15:46:31.000Z
|
from game_state_machine.GameState import GameState
from definitions import background_color, L, PX
from utils import sound_path
import pygame
# Inherit from a game_state that does nothing
class Help(GameState):
def __init__(self):
super().__init__()
self.speed_text = "1. The rookie chain (our adapted snake) has a constant non-zero speed."
self.direction_text = "2. For orientation use the arrow keys in your keyboard."
self.pause_text = "3. Press 'esc' to pause (you don't lose progress)."
self.rules_text = "4. To upgrade your score, collect more rookies (1 rookie = 1 score point)."
self.boundary_text = "5. The map boundaries can do you harm, watch out!"
self.final_text = "6. Press any key to go back to the Main Menu."
self.texts = ["Help Menu", self.speed_text, self.direction_text, self.pause_text,
self.rules_text, self.boundary_text, self.final_text]
self.enter_sound = pygame.mixer.Sound(sound_path('enter.ogg'))
def startup(self):
self.update()
def cleanup(self):
pass
def update(self):
self.set_texts()
self.set_rect_centers()
def draw(self, surface):
M = 20
border_rect = pygame.Rect((0, 0), (L * PX - 2 * M, L * PX - 2 * M)).inflate(0, -250)
border_rect.center = self.get_screen_rect().center
surface.fill(background_color)
pygame.draw.rect(surface, "orange", border_rect, width=2, border_radius=1)
surface.blit(self.title, self.title_rect)
surface.blit(self.speed, self.speed_rect)
surface.blit(self.direction, self.direction_rect)
surface.blit(self.pause, self.pause_rect)
surface.blit(self.rules, self.rules_rect)
surface.blit(self.boundary, self.boundary_rect)
surface.blit(self.final, self.final_rect)
def on_key_up(self, e):
self.next_state = "Menu"
self.enter_sound.play()
self.done = True
def on_mouse_up(self, e):
pass
def down(self):
pass
def up(self):
pass
# noinspection DuplicatedCode
def set_texts(self):
f1, f2 = (self.fonts[x] for x in ('h2', 'h3'))
self.title = f1.render(self.texts[0], True, pygame.Color("green"))
self.speed = f2.render(self.texts[1], True, pygame.Color("yellow"))
self.direction = f2.render(self.texts[2], True, pygame.Color("yellow"))
self.pause = f2.render(self.texts[3], True, pygame.Color("yellow"))
self.rules = f2.render(self.texts[4], True, pygame.Color("yellow"))
self.boundary = f2.render(self.texts[5], True, pygame.Color("yellow"))
self.final = f2.render(self.texts[6], True, pygame.Color("red"))
# noinspection DuplicatedCode
def set_rect_centers(self):
self.set_texts()
r = self.get_screen_rect()
K = 30
self.title_center = r.move(0, -3 * K).center
self.speed_center = r.move(0, -2 * K).center
self.direction_center = r.move(0, - 1 * K).center
self.pause_center = r.move(0, 0 * K).center
self.rules_center = r.move(0, 1 * K).center
self.boundary_center = r.move(0, 2 * K).center
self.final_center = r.move(0, 3 * K).center
self.title_rect = self.title.get_rect(center=self.title_center)
self.speed_rect = self.speed.get_rect(center=self.speed_center)
self.direction_rect = self.direction.get_rect(center=self.direction_center)
self.pause_rect = self.pause.get_rect(center=self.pause_center)
self.rules_rect = self.rules.get_rect(center=self.rules_center)
self.boundary_rect = self.boundary.get_rect(center=self.boundary_center)
self.final_rect = self.final.get_rect(center=self.final_center)
| 41.692308
| 102
| 0.648392
|
d905ac721a537fdca46cc7572eff4fe887ed4b19
| 1,244
|
py
|
Python
|
tests/test_simple_glazing.py
|
louisleroy5/archetypal
|
71f13aaed859c10e663e68624e2b74d816de631f
|
[
"MIT"
] | null | null | null |
tests/test_simple_glazing.py
|
louisleroy5/archetypal
|
71f13aaed859c10e663e68624e2b74d816de631f
|
[
"MIT"
] | null | null | null |
tests/test_simple_glazing.py
|
louisleroy5/archetypal
|
71f13aaed859c10e663e68624e2b74d816de631f
|
[
"MIT"
] | null | null | null |
import warnings
import pytest
from archetypal.simple_glazing import calc_simple_glazing
def test_glazing():
"""Simulates a Double Clear Air Glazing System (Window.exe v.7.5)"""
res = calc_simple_glazing(0.704, 2.703, 0.786)
print(res["Thickness"])
def test_absurd():
"""Simulates a Double Clear Air Glazing System (Window.exe v.7.5). Will
raise an error when checking Visible Transmittance at Normal Incidence +
Back Side Visible Reflectance at Normal Incidence not <= 1.0"""
with pytest.warns(UserWarning):
calc_simple_glazing(0.704, 2.703, 0.9)
@pytest.mark.parametrize(
"another",
[
calc_simple_glazing(0.5, 2.2, 0.21),
calc_simple_glazing(0.6, 2.2),
calc_simple_glazing(0.8, 2.2, 0.35),
calc_simple_glazing(1.2, 0.1, 10),
],
)
def test_glazing_unequal(another):
t1 = calc_simple_glazing(0.6, 2.2, 0.21)
assert t1 != another
def test_simple_glazing_system_equal():
dict = calc_simple_glazing(0.6, 2.2, 0.21)
assert dict["Conductivity"] == 0.11992503503877955
def test_simple_glazing_value_error():
# Should raise an error since u-value is higher than 7
with pytest.raises(ValueError):
calc_simple_glazing(1.2, 8, 10)
| 27.644444
| 76
| 0.688907
|
231af0abcba7c88fe48182f59d82aacb3db0dd35
| 5,395
|
py
|
Python
|
v1.0.0.test/toontown/racing/DistributedRacePadAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v1.0.0.test/toontown/racing/DistributedRacePadAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v1.0.0.test/toontown/racing/DistributedRacePadAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import globalClockDelta
from direct.fsm.FSM import FSM
from toontown.racing import RaceGlobals
from toontown.racing.DistributedKartPadAI import DistributedKartPadAI
from toontown.racing.KartShopGlobals import KartGlobals
from toontown.toonbase import ToontownGlobals
class DistributedRacePadAI(DistributedKartPadAI, FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedRacePadAI')
defaultTransitions = {'Off': ['WaitEmpty'], 'WaitEmpty': [
'WaitCountdown', 'Off'],
'WaitCountdown': [
'WaitEmpty',
'WaitBoarding',
'Off',
'AllAboard'],
'WaitBoarding': [
'AllAboard', 'WaitEmpty', 'Off'],
'AllAboard': [
'Off', 'WaitEmpty', 'WaitCountdown']}
def __init__(self, air):
DistributedKartPadAI.__init__(self, air)
FSM.__init__(self, 'DistributedRacePadAI')
self.genre = 'urban'
self.state = 'Off'
self.trackInfo = [0, 0]
self.laps = 3
self.avIds = []
def enterAllAboard(self):
taskMgr.doMethodLater(KartGlobals.ENTER_RACE_TIME, self.enterRace, self.uniqueName('enterRaceTask'))
def exitAllAboard(self):
self.avIds = []
def considerAllAboard(self, task=None):
for block in self.startingBlocks:
if block.currentMovie:
if not self.state == 'WaitBoarding':
self.request('WaitBoarding')
return
if self.trackInfo[1] in (RaceGlobals.ToonBattle, RaceGlobals.Circuit):
if len(self.avIds) < 2:
for block in self.startingBlocks:
if block.avId != 0:
block.normalExit()
self.request('WaitEmpty')
return
self.request('AllAboard')
if task:
return task.done
def enterWaitCountdown(self):
taskMgr.doMethodLater(KartGlobals.COUNTDOWN_TIME, self.considerAllAboard, self.uniqueName('countdownTask'))
def exitWaitCountdown(self):
taskMgr.remove(self.uniqueName('countdownTask'))
def enterWaitBoarding(self):
pass
def enterWaitEmpty(self):
taskMgr.doMethodLater(RaceGlobals.TrackSignDuration, self.changeTrack, self.uniqueName('changeTrack'))
def exitWaitEmpty(self):
taskMgr.remove(self.uniqueName('changeTrack'))
def changeTrack(self, task):
trackInfo = RaceGlobals.getNextRaceInfo(self.trackInfo[0], self.genre, self.index)
trackId, raceType = trackInfo[0], trackInfo[1]
if raceType == RaceGlobals.ToonBattle:
if ToontownGlobals.CIRCUIT_RACING in self.air.holidayManager.currentHolidays or ToontownGlobals.CIRCUIT_RACING_EVENT in self.air.holidayManager.currentHolidays or ToontownGlobals.SILLY_SATURDAY_CIRCUIT in self.air.holidayManager.currentHolidays:
raceType = RaceGlobals.Circuit
self.setTrackInfo([trackId, raceType])
self.laps = trackInfo[2]
self.sendUpdate('setTrackInfo', [self.trackInfo])
return task.again
def enterRace(self, task):
trackId, raceType = self.trackInfo
circuitLoop = []
if raceType == RaceGlobals.Circuit:
circuitLoop = RaceGlobals.getCircuitLoop(trackId)
raceZone = self.air.raceMgr.createRace(trackId, raceType, self.laps, self.avIds, circuitLoop=circuitLoop[1:], circuitPoints={}, circuitTimes={})
for block in self.startingBlocks:
self.sendUpdateToAvatarId(block.avId, 'setRaceZone', [raceZone])
block.raceExit()
return task.done
def addAvBlock(self, avId, startingBlock, paid):
av = self.air.doId2do.get(avId)
if not av:
return
if not av.hasKart():
return KartGlobals.ERROR_CODE.eNoKart
if self.state == 'Off':
return KartGlobals.ERROR_CODE.eTrackClosed
if self.state in ('AllAboard', 'WaitBoarding'):
return KartGlobals.ERROR_CODE.eBoardOver
if startingBlock.avId != 0:
return KartGlobals.ERROR_CODE.eOcuppied
if RaceGlobals.getEntryFee(self.trackInfo[0], self.trackInfo[1]) > av.getTickets():
return KartGlobals.ERROR_CODE.eTickets
self.avIds.append(avId)
if not self.state == 'WaitCountdown':
self.request('WaitCountdown')
return KartGlobals.ERROR_CODE.success
def removeAvBlock(self, avId, startingBlock):
if avId in self.avIds:
self.avIds.remove(avId)
def kartMovieDone(self):
if len(self.avIds) == 0 and not self.state == 'WaitEmpty':
self.request('WaitEmpty')
if self.state == 'WaitBoarding':
self.considerAllAboard()
def getState(self):
return (
self.state, globalClockDelta.getRealNetworkTime())
def getTrackInfo(self):
return self.trackInfo
def request(self, state):
FSM.request(self, state)
self.state = state
self.sendUpdate('setState', [state, globalClockDelta.getRealNetworkTime()])
def setRaceZone(self, todo0):
pass
def setTrackInfo(self, trackInfo):
self.trackInfo = [
trackInfo[0], trackInfo[1]]
| 38.262411
| 257
| 0.637998
|
1b554d76e467f1a248fb7572ebaa530ce5d1fed8
| 5,824
|
py
|
Python
|
tests/integration_time_test.py
|
uncbiag/neuro_shooting
|
ebf80a9c0a4515fdfffaa0c5418dc866e2776495
|
[
"Apache-2.0"
] | 6
|
2020-12-10T19:03:08.000Z
|
2021-09-30T09:05:17.000Z
|
tests/integration_time_test.py
|
uncbiag/neuro_shooting
|
ebf80a9c0a4515fdfffaa0c5418dc866e2776495
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_time_test.py
|
uncbiag/neuro_shooting
|
ebf80a9c0a4515fdfffaa0c5418dc866e2776495
|
[
"Apache-2.0"
] | 1
|
2020-12-13T08:46:13.000Z
|
2020-12-13T08:46:13.000Z
|
# the goal of this script is to use determine if integration with multiple time-steps works as intended
import numpy as np
import random
seed = 1234
print('Setting the random seed to {:}'.format(seed))
random.seed(seed)
np.random.seed(seed)
import torch
torch.manual_seed(seed)
import neuro_shooting.shooting_blocks as shooting_blocks
import neuro_shooting.shooting_models as shooting_models
import neuro_shooting.generic_integrator as generic_integrator
def zero_grads(pars):
r"""Clears the gradients of all optimized :class:`torch.Tensor` s."""
for p in pars:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# particle setup
nonlinearity = 'tanh'
nr_of_particles = 10
parameter_weight = 0.1
# create a simple integrator
stepsize = 0.1
integrator_options = {'step_size': stepsize}
in_features_size = 2
#check_models = ['updown']
#check_models = ['DEBUG']
check_models = ['simple']
#check_models = ['universal']
#check_models = ['updown','DEBUG','simple',"universal"]
number_of_tests_passed = 0
number_of_tests_attempted = 0
tolerance = 5e-3
integrator = generic_integrator.GenericIntegrator(integrator_library = 'odeint', integrator_name = 'rk4',
use_adjoint_integration=False,
integrator_options=integrator_options)
for current_model in check_models:
if current_model=='simple':
shooting_model = shooting_models.AutoShootingIntegrandModelSimple(in_features=in_features_size,
nonlinearity=nonlinearity,
nr_of_particles=nr_of_particles,
particle_dimension=1,
particle_size=in_features_size,
parameter_weight=parameter_weight)
elif current_model == 'universal':
shooting_model = shooting_models.AutoShootingIntegrandModelUniversal(in_features=in_features_size,
nonlinearity=nonlinearity,
nr_of_particles=nr_of_particles,
particle_dimension=1,
particle_size=in_features_size,
parameter_weight=parameter_weight,
inflation_factor=5)
elif current_model=='updown':
shooting_model = shooting_models.AutoShootingIntegrandModelUpDown(in_features=in_features_size, nonlinearity=nonlinearity,
nr_of_particles=nr_of_particles,particle_dimension = 1,
particle_size = in_features_size,
parameter_weight=parameter_weight,
inflation_factor=5)
elif current_model == 'DEBUG':
shooting_model = shooting_models.DEBUGAutoShootingIntegrandModelSimple(in_features=in_features_size,
nonlinearity=nonlinearity,
nr_of_particles=nr_of_particles,
particle_dimension=1,
particle_size=in_features_size,
parameter_weight=parameter_weight)
else:
raise ValueError('Unknown model to check: {}'.format( current_model ))
use_analytic_solution = True
shooting_block = shooting_blocks.ShootingBlockBase(name='test', shooting_integrand=shooting_model, integrator_options=integrator_options)
shooting_model.use_analytic_solution = use_analytic_solution
print('\n\nChecking model: {}'.format(current_model))
print('-------------------------------------\n')
# create some sample data
sample_data = torch.randn([1,1,in_features_size])
# run through the shooting block once to get the necessary parameters
shooting_block(x=sample_data)
# create overall time-vector, for simplicity make sure it corresponds to the step-size
t_np = np.array(range(0,16))*stepsize
t = torch.from_numpy(t_np)
# first let's try to integrate this all at once
shooting_block.set_integration_time_vector(integration_time_vector=t, suppress_warning=True)
pred,_,_,_ = shooting_block(x=sample_data)
# now integrate it step by step
pred_step_by_step = torch.zeros_like(pred)
pred_step_by_step[0,...] = sample_data
for ind,ct in enumerate(t[1:]):
shooting_block.set_integration_time(ct)
cpred, _, _, _ = shooting_block(x=sample_data)
pred_step_by_step[ind+1,...] = cpred
print('Pred = {}\n'.format(pred[:,0,0,:]))
print('Pred_step_by_step = {}\n'.format(pred_step_by_step[:,0,0,:]))
print('Pred-pred_step_by_step = {}\n'.format(pred[:,0,0,:]-pred_step_by_step[:,0,0,:]))
print('diff(pred) = {}\n'.format(pred[1:,0,0,:]-pred[:-1,0,0,:]))
print('diff(pred_step_by_step) = {}\n'.format(pred_step_by_step[1:,0,0,:]-pred_step_by_step[:-1,0,0,:]))
| 46.222222
| 141
| 0.545673
|
d29f45a76ed07c66f147c5c97c8b9bf2621a184f
| 5,847
|
py
|
Python
|
lib/rucio/web/rest/flaskapi/v1/common.py
|
andresailer/rucio
|
5cdc5387ad4fbd9934973311e0b0ecb0ec198c28
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/web/rest/flaskapi/v1/common.py
|
andresailer/rucio
|
5cdc5387ad4fbd9934973311e0b0ecb0ec198c28
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/web/rest/flaskapi/v1/common.py
|
andresailer/rucio
|
5cdc5387ad4fbd9934973311e0b0ecb0ec198c28
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
from __future__ import print_function
import itertools
import re
from functools import wraps
from time import time
from traceback import format_exc
import six
from flask import request, Response
from rucio.api.authentication import validate_auth_token
from rucio.common.exception import RucioException
from rucio.common.schema import get_schema_value
from rucio.common.utils import generate_uuid
from rucio.web.rest.utils import generate_http_error_flask
def request_auth_env():
if request.environ.get('REQUEST_METHOD') == 'OPTIONS':
return '', 200
auth_token = request_header_ensure_string('X-Rucio-Auth-Token')
try:
auth = validate_auth_token(auth_token)
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
if auth is None:
return generate_http_error_flask(401, 'CannotAuthenticate', 'Cannot authenticate with given credentials')
request.environ['vo'] = auth.get('vo', 'def')
request.environ['issuer'] = auth.get('account')
request.environ['identity'] = auth.get('identity')
request.environ['request_id'] = generate_uuid()
request.environ['start_time'] = time()
def response_headers(response):
response.headers['Access-Control-Allow-Origin'] = request.environ.get('HTTP_ORIGIN')
response.headers['Access-Control-Allow-Headers'] = request.environ.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS')
response.headers['Access-Control-Allow-Methods'] = '*'
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.environ.get('REQUEST_METHOD') == 'GET':
response.headers['Cache-Control'] = 'no-cache, no-store, max-age=0, must-revalidate'
response.headers['Cache-Control'] = 'post-check=0, pre-check=0'
response.headers['Pragma'] = 'no-cache'
return response
def check_accept_header_wrapper_flask(supported_content_types):
""" Decorator to check if an endpoint supports the requested content type. """
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
if not request.accept_mimetypes.provided:
# accept anything, if Accept header is not provided
return f(*args, **kwargs)
for supported in supported_content_types:
if supported in request.accept_mimetypes:
return f(*args, **kwargs)
# none matched..
return generate_http_error_flask(406, 'UnsupportedRequestedContentType', 'The requested content type %s is not supported. Use %s.' % (request.environ.get("HTTP_ACCEPT"), str(supported_content_types)))
return decorated
return wrapper
def parse_scope_name(scope_name):
"""
Parses the given scope_name according to the schema's
SCOPE_NAME_REGEXP and returns a (scope, name) tuple.
:param scope_name: the scope_name string to be parsed.
:raises ValueError: when scope_name could not be parsed.
:returns: a (scope, name) tuple.
"""
# why again does that regex start with a slash?
scope_name = re.match(get_schema_value('SCOPE_NAME_REGEXP'), '/' + scope_name)
if scope_name is None:
raise ValueError('cannot parse scope and name')
return scope_name.group(1, 2)
def try_stream(generator, content_type=None):
"""
Peeks at the first element of the passed generator and raises
an error, if yielding raises. Otherwise returns
a flask.Response object.
:param generator: a generator function or an iterator.
:param content_type: the response's Content-Type.
'application/x-json-stream' by default.
:returns: a flask.Response object with the specified Content-Type.
"""
if not content_type:
content_type = 'application/x-json-stream'
it = iter(generator)
try:
peek = next(it)
return Response(itertools.chain((peek,), it), content_type=content_type)
except StopIteration:
return Response('', content_type=content_type)
def request_header_ensure_string(key, default=None):
"""
Supplement for request.headers.get(...), which returns
unicode strings for Python 2.
:param key: the header name (case-insensitive).
:param default: the value to return, if the header is absent.
Returns None by default.
:raises TypeError: when the header value was not of binary type.
:returns: default, if the key is not present or a str type
corresponding to the header's value.
"""
hdrval = request.headers.get(key, default=default, as_bytes=True)
if hdrval is None or hdrval == default:
return hdrval
elif isinstance(hdrval, six.binary_type):
return six.ensure_str(hdrval)
else:
raise TypeError("Unexpected header value type: " + str(type(hdrval)))
| 37.480769
| 212
| 0.703096
|
aef85e465391b069f10e48cdbcebf0192a401e71
| 1,817
|
py
|
Python
|
tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import patch
from openvino.tools.mo.front.caffe.proposal_ext import ProposalFrontExtractor
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.ops.op import Op
from unit_tests.utils.extractors import FakeMultiParam
from unit_tests.utils.graph import FakeNode
class FakeProposalProtoLayer:
def __init__(self, val):
self.proposal_param = val
class TestProposalExt(unittest.TestCase):
@classmethod
def setUpClass(cls):
Op.registered_ops['Proposal'] = ProposalOp
def test_proposal_no_pb_no_ml(self):
self.assertRaises(AttributeError, ProposalFrontExtractor.extract, None)
@patch('openvino.tools.mo.front.caffe.proposal_ext.merge_attrs')
def test_proposal_ext_ideal_numbers(self, merge_attrs):
params = {
'feat_stride': 1,
'base_size': 16,
'min_size': 16,
'ratio': 1,
'scale': 2,
'pre_nms_topn': 6000,
'post_nms_topn': 300,
'nms_thresh': 0.7
}
merge_attrs.return_value = {
**params
}
fake_pl = FakeProposalProtoLayer(FakeMultiParam(params))
fake_node = FakeNode(fake_pl, None)
ProposalFrontExtractor.extract(fake_node)
exp_res = {
'type': "Proposal",
'feat_stride': 1,
'base_size': 16,
'min_size': 16,
'ratio': 1,
'scale': 2,
'pre_nms_topn': 6000,
'post_nms_topn': 300,
'nms_thresh': 0.7,
'infer': ProposalOp.proposal_infer
}
for key in exp_res.keys():
self.assertEqual(fake_node[key], exp_res[key])
| 28.84127
| 79
| 0.621904
|
871c9c77a0110f3fc64bc305a23de26e68c901f0
| 1,368
|
py
|
Python
|
sources/migrations/versions/919a41af1b56_.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | 1
|
2021-06-23T20:19:45.000Z
|
2021-06-23T20:19:45.000Z
|
sources/migrations/versions/919a41af1b56_.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | null | null | null |
sources/migrations/versions/919a41af1b56_.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 919a41af1b56
Revises: afe05e78ec30
Create Date: 2020-07-25 22:41:48.553542
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '919a41af1b56'
down_revision = 'afe05e78ec30'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('transistor_array_mosfet',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('number_of_channels', sa.String(length=30), nullable=True),
sa.Column('rds_on', sa.String(length=30), nullable=True),
sa.Column('vgs_max', sa.String(length=30), nullable=True),
sa.Column('vgs_th', sa.String(length=30), nullable=True),
sa.Column('vds_max', sa.String(length=30), nullable=True),
sa.Column('ids_max', sa.String(length=30), nullable=True),
sa.Column('current_total_max', sa.String(length=30), nullable=True),
sa.Column('power_max', sa.String(length=30), nullable=True),
sa.Column('channel_type', sa.String(length=30), nullable=True),
sa.ForeignKeyConstraint(['id'], ['component.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('transistor_array_mosfet')
# ### end Alembic commands ###
| 32.571429
| 73
| 0.692982
|
6b2dc21129e39211430e06f4b27eabfcb61003aa
| 55
|
py
|
Python
|
src/modules/bidding/domain/events.py
|
pgorecki/python-ddd
|
0073ccce35c651be263f5d7d3d63f9a49bc0b78a
|
[
"MIT"
] | 10
|
2022-03-16T19:26:51.000Z
|
2022-03-31T23:50:51.000Z
|
src/modules/bidding/domain/events.py
|
pgorecki/python-ddd
|
0073ccce35c651be263f5d7d3d63f9a49bc0b78a
|
[
"MIT"
] | null | null | null |
src/modules/bidding/domain/events.py
|
pgorecki/python-ddd
|
0073ccce35c651be263f5d7d3d63f9a49bc0b78a
|
[
"MIT"
] | 2
|
2022-03-16T19:26:54.000Z
|
2022-03-27T13:21:02.000Z
|
class PlacedBidIsGreaterThanCurrentWinningBid:
...
| 18.333333
| 46
| 0.8
|
7b2f5cb56622a02df561946066434e466e9f5882
| 10,793
|
py
|
Python
|
cifar/compute_if.py
|
xszheng2020/memorization
|
6270df8db388922fc35d6cd7b23112e74fbbe1f6
|
[
"Apache-2.0"
] | 4
|
2022-03-16T12:05:47.000Z
|
2022-03-28T12:21:36.000Z
|
cifar/compute_if.py
|
xszheng2020/memorization
|
6270df8db388922fc35d6cd7b23112e74fbbe1f6
|
[
"Apache-2.0"
] | null | null | null |
cifar/compute_if.py
|
xszheng2020/memorization
|
6270df8db388922fc35d6cd7b23112e74fbbe1f6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import config
import random
import numpy as np
import pandas as pd
pd.set_option('max_colwidth', 256)
# +
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
# %matplotlib inline
set_matplotlib_formats('svg')
# -
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
# +
import torchvision
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
from torchvision import models
# -
from tqdm import tqdm
from sklearn.metrics import classification_report
import pickle
from contexttimer import Timer
from model import CustomModel
from dataset import CustomDataset, CustomDatasetWithMask
def set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def compute_s(model,
v,
train_data_loader,
damp,
scale,
num_samples):
last_estimate = list(v).copy()
with tqdm(total=num_samples) as pbar:
for i, batch in enumerate(train_data_loader):
####
labels = batch[1].cuda()
inputs = batch[2].cuda()
####
this_estimate = compute_hessian_vector_products(model=model,
vectors=last_estimate,
labels=labels,
inputs=inputs,
)
# Recursively caclulate h_estimate
# https://github.com/dedeswim/pytorch_influence_functions/blob/master/pytorch_influence_functions/influence_functions/hvp_grad.py#L118
with torch.no_grad():
new_estimate = [
a + (1 - damp) * b - c / scale
for a, b, c in zip(v, last_estimate, this_estimate)
]
####
pbar.update(1)
new_estimate_norm = new_estimate[0].norm().item()
last_estimate_norm = last_estimate[0].norm().item()
estimate_norm_diff = new_estimate_norm - last_estimate_norm
pbar.set_description(f"{new_estimate_norm:.2f} | {estimate_norm_diff:.2f}")
####
last_estimate = new_estimate
if i > num_samples: # should be i>=(num_samples-1) but does not matters
break
# References:
# https://github.com/kohpangwei/influence-release/blob/master/influence/genericNeuralNet.py#L475
# Do this for each iteration of estimation
# Since we use one estimation, we put this at the end
inverse_hvp = [X / scale for X in last_estimate]
return inverse_hvp
def compute_hessian_vector_products(model,
vectors,
labels,
inputs):
####
outputs = model(inputs)
ce_loss = F.cross_entropy(outputs, labels)
####
hack_loss = torch.cat([
(p**2).view(-1)
for n, p in model.named_parameters()
if ((not any(nd in n for nd in no_decay)) and (p.requires_grad==True))
]).sum() * (opt.L2_LAMBDA)
####
loss = ce_loss + hack_loss
####
model.zero_grad()
grad_tuple = torch.autograd.grad(
outputs=loss,
inputs=[param for name, param in model.named_parameters()
if param.requires_grad],
create_graph=True)
####
# model.zero_grad()
grad_grad_tuple = torch.autograd.grad(
outputs=grad_tuple,
inputs=[param for name, param in model.named_parameters()
if param.requires_grad],
grad_outputs=vectors,
only_inputs=True
)
return grad_grad_tuple
# +
if __name__ == '__main__':
opt = config.parse_opt_if()
print(opt)
####
set_seeds(opt.SEED)
####
input_size = 224
transform_test = transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
####
print(os.path.join(opt.DATA_PATH, opt.ORDER, '{}.csv'.format(opt.PERCENTAGE)))
train = pd.read_csv(os.path.join(opt.DATA_PATH, opt.ORDER, '{}.csv'.format(opt.PERCENTAGE)))
print(train.info())
train_dev = pd.read_csv(os.path.join(opt.DATA_PATH, opt.TRAIN_DEV_DATA))
dev = pd.read_csv(os.path.join(opt.DATA_PATH, opt.DEV_DATA))
####
train_dataset = CustomDataset(train, root='./data/cifar_10/train/')
####
with open(os.path.join(opt.DATA_PATH, opt.ATTR_ORDER, '{}.pkl'.format(opt.ATTR_PERCENTAGE)), "rb") as handle:
mask_list = pickle.load(handle)
print(mask_list[0:5])
train_dev_dataset = CustomDatasetWithMask(data=train_dev, root='./data/cifar_10/train/', mask=mask_list)
train_dev_dataloader = DataLoader(train_dev_dataset, batch_size=1,
shuffle=False,
# pin_memory=True,
num_workers=0)
####
dev_dataset = CustomDataset(dev, root='./data/cifar_10/train/')
dev_dataloader = DataLoader(dev_dataset, batch_size=opt.TEST_BATCH_SIZE,
shuffle=False,
# pin_memory=True,
num_workers=0)
####
model = CustomModel(opt)
for name, param in model.named_parameters():
print(name)
print(param.requires_grad)
model.cuda()
filename = os.path.join(opt.OUTPUT, opt.ORDER, str(opt.PERCENTAGE), str(opt.CHECKPOINT), 'checkpoint/{}.pth'.format(opt.EPOCH-1))
model.load_state_dict(
torch.load(filename)
)
no_decay = ['bias', 'LayerNorm.weight']
model.cuda()
model.eval()
####
# predictions_list = []
# for idx, batch in enumerate(tqdm(dev_dataloader)):
# labels = batch[1].cuda()
# inputs = batch[2].cuda()
# with torch.no_grad():
# outputs = model(inputs)
# predictions = outputs.detach().cpu().numpy()
# predictions_list.append(predictions)
# predictions = np.vstack(predictions_list)
# predictions = np.argmax(predictions, axis=1)
# dev['prediction'] = predictions
# print(classification_report(dev['label'], dev['prediction'], digits=4))
####
start = opt.START
length = opt.LENGTH
print(start, length)
output_collections = []
####
for idx, batch in enumerate(train_dev_dataloader):
####
if idx < start:
continue
if idx >= start+length:
break
####
z_index = batch[0]
z_labels = batch[1].cuda()
z_inputs = batch[2].cuda()
z_mask = batch[3].cuda()
####
outputs = model(z_inputs)
####
prob = F.softmax(outputs, dim=-1)
prediction = torch.argmax(prob, dim=1)
# if prediction==z_labels:
# continue
prob_gt = torch.gather(prob, 1, z_labels.unsqueeze(1))
# print(prob_gt)
####
model.zero_grad()
v = torch.autograd.grad(outputs=prob_gt,
inputs=[param for name, param in model.named_parameters()
if param.requires_grad],
create_graph=False)
####
for repetition in range(4): # here we do not repeat
with Timer() as timer:
####
train_dataloader = DataLoader(train_dataset,
batch_size=1,
shuffle=True,
# pin_memory=True,
num_workers=0)
####
s = compute_s(model=model,
v=v,
train_data_loader=train_dataloader,
damp=opt.DAMP,
scale=opt.SCALE,
num_samples=opt.NUM_SAMPLES)
####
time_elapsed = timer.elapsed
# print(f"{time_elapsed:.2f} seconds")
####
####
z_inputs_masked = z_inputs.clone() # 1, 2048, 7, 7
z_mask = z_mask.unsqueeze(1).expand(-1, 2048, -1, -1) # 1, 2048, 7, 7
z_inputs_masked.masked_fill_(z_mask==1, 0.0)
####
outputs = model(z_inputs_masked)
####
ce_loss_gt = F.cross_entropy(outputs, z_labels)
z_hack_loss = torch.cat([
(p**2).view(-1)
for n, p in model.named_parameters()
if ((not any(nd in n for nd in no_decay)) and (p.requires_grad==True))
]).sum() * (opt.L2_LAMBDA)
####
model.zero_grad()
grad_tuple_ = torch.autograd.grad(outputs=ce_loss_gt+z_hack_loss,
inputs=[param for name, param in model.named_parameters()
if param.requires_grad],
create_graph=True)
####
influence = [-torch.sum(x * y) for x, y in zip(s, grad_tuple_)]
influence = sum(influence).item()
####
outputs = {
"index": z_index.detach().cpu().numpy()[0],
"label": z_labels.detach().cpu().numpy()[0],
"prob": prob.detach().cpu().numpy()[0],
"prediction": prediction.detach().cpu().numpy()[0],
"influence": influence,
"repetition": repetition,
"time_elapsed": time_elapsed,
}
print(idx)
print(outputs['index'])
print(outputs['label'], outputs['prob'], outputs['prediction'])
print('influence: ', outputs['influence'])
output_collections.append(outputs)
####
break
# +
filename = os.path.join(opt.OUTPUT, 'eval_attr/{}/{}/{}.pkl'.format(opt.ATTR_ORDER, opt.ATTR_PERCENTAGE, start))
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "wb") as handle:
pickle.dump(output_collections, handle)
# -
| 33.209231
| 146
| 0.527008
|
b95c2d9171a1c770e3e273a4dd4a386646d92d4d
| 392
|
py
|
Python
|
rest_api/schemas/errors.py
|
parisbs/python-rest-api
|
97ef302f1613545d8c411b65b55cf7c9c8f5a95d
|
[
"MIT"
] | null | null | null |
rest_api/schemas/errors.py
|
parisbs/python-rest-api
|
97ef302f1613545d8c411b65b55cf7c9c8f5a95d
|
[
"MIT"
] | 2
|
2021-04-30T20:52:42.000Z
|
2021-06-02T00:24:27.000Z
|
rest_api/schemas/errors.py
|
parisbs/python-rest-api
|
97ef302f1613545d8c411b65b55cf7c9c8f5a95d
|
[
"MIT"
] | null | null | null |
from flask_restplus import fields
common_error_schema = {
'message': fields.String(
title='Message',
description='Error message',
example='Invalid request',
readOnly=True
),
'extra_data': fields.String(
title='Extra data',
description='Extra data fields',
example='extra1: some, extra2: 10',
readOnly=True
),
}
| 23.058824
| 43
| 0.59949
|
a90f8087323dafd32c7a135a3bef2fa714386432
| 8,598
|
py
|
Python
|
zazi/apps/loan_ledger/utils/financial_statements.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | null | null | null |
zazi/apps/loan_ledger/utils/financial_statements.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | 1
|
2021-08-20T06:41:57.000Z
|
2021-08-20T06:41:57.000Z
|
zazi/apps/loan_ledger/utils/financial_statements.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
from dateutil.relativedelta import relativedelta
from decimal import Decimal as D
from django.conf import settings
from django.db import transaction as db_transaction, models
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from zazi.apps.banking.models import BankAccount
from ..models import \
LoanTransactionEntry, LoanLedgerAccount, \
OwnersCapitalEntry
from ..enums import LoanLedgerAccountType
#--------------
import logging
logger = logging.getLogger(__name__)
#--------------
def humanize(amount):
if amount >= 0:
return f"{amount:,.2f}"
return f"({abs(amount):,.2f})"
class LedgerAccount(dict): pass
def get_balance_sheet(loan_ledger_balance):
bank_balance = sum([b.current_balance for b in BankAccount.objects.all()])
def get_assets():
def get_cash_at_bank():
# TODO: Fix this hack... Think about how the money
# moves in and out the loan ledger
if settings.DEBUG:
_bank_balance = bank_balance + (
loan_ledger_balance.loan_fund_source_balance +
loan_ledger_balance.loan_fund_source_balance_bf)
else:
_bank_balance = bank_balance
return (_bank_balance, LedgerAccount(
name="Cash at Bank",
balance=humanize(_bank_balance),
children=[]))
def get_interest_earned():
balance = (
loan_ledger_balance.interest_receivable_balance +
loan_ledger_balance.interest_receivable_balance_bf )
if balance < 0:
interest_accrued = 0
interest_paid = balance
else:
interest_accrued = balance
interest_paid = 0
return balance, LedgerAccount(
name='Interest Earned',
balance=humanize(balance),
children=[
LedgerAccount(
name='Interest Accrued',
balance=humanize(interest_accrued),
children=[
LedgerAccount(
name='Interest Paid',
balance=humanize(interest_paid),
children=[])])])
def get_loan_portfolio():
balance = (
loan_ledger_balance.loan_portfolio_balance +
loan_ledger_balance.loan_portfolio_balance_bf)
if balance < 0:
loan_principal_due = 0
loan_principal_paid = balance
else:
loan_principal_due = balance
loan_principal_paid = 0
return balance, LedgerAccount(
name='Loan Portfolio',
balance=humanize(balance),
children=[
LedgerAccount(
name='Loan Principal Due',
balance=humanize(loan_principal_due),
children=[
LedgerAccount(
name='Loan Principal Paid',
balance=humanize(loan_principal_paid),
children=[])])])
# --------
(cash_at_bank, cash_at_bank_items) = get_cash_at_bank()
(loan_portfolio, loan_portfolio_items) = get_loan_portfolio()
(interest_earned, interest_earned_items) = get_interest_earned()
assets_total = (cash_at_bank + loan_portfolio + interest_earned)
return (
assets_total, [
cash_at_bank_items,
loan_portfolio_items,
interest_earned_items,
])
def get_equity():
def get_owners_equity():
aggregate = OwnersCapitalEntry.objects\
.all()\
.values('amount')\
.aggregate(balance=models.Sum('amount'))
entry_balance = aggregate.get('balance')
total_owners_equity = entry_balance
return total_owners_equity, LedgerAccount(
name="Owner's Equity",
balance=humanize(total_owners_equity),
children=[]
)
def get_expected_earnings():
def get_write_off_expense_balance():
return (
loan_ledger_balance.principal_write_off_expense_balance_bf +
loan_ledger_balance.principal_write_off_expense_balance +
loan_ledger_balance.interest_write_off_expense_balance_bf +
loan_ledger_balance.interest_write_off_expense_balance +
loan_ledger_balance.penalties_write_off_expense_balance_bf +
loan_ledger_balance.penalties_write_off_expense_balance +
loan_ledger_balance.fees_write_off_expense_balance_bf +
loan_ledger_balance.fees_write_off_expense_balance)
def get_earned_revenue_balance():
return (
loan_ledger_balance.interest_revenue_balance_bf +
loan_ledger_balance.interest_revenue_balance)
def get_fees_expense():
return (
loan_ledger_balance.fees_revenue_balance_bf +
loan_ledger_balance.fees_revenue_balance)
expense = (get_write_off_expense_balance() + get_fees_expense())
expected_earnings = (get_earned_revenue_balance() - expense)
return (expected_earnings, LedgerAccount(
name="Expected Earnings",
balance=humanize(expected_earnings),
children=[]))
#------------------
owners_equity_total, owners_equity_items = get_owners_equity()
expected_earnings_total, expected_earnings_items = get_expected_earnings()
return (owners_equity_total + expected_earnings_total), [
owners_equity_items,
expected_earnings_items,
LedgerAccount(
name="Retained Earnings",
balance=D('0.00'),
children=[]
)
]
def get_liabilities():
liabilities_total = (
loan_ledger_balance.loan_liabilities_balance_bf +
loan_ledger_balance.loan_liabilities_balance
)
return liabilities_total, [
LedgerAccount(
name='Loan Overpayments',
balance=humanize(liabilities_total),
children=[]
)
]
#--------------------
assets_total, assets_items = get_assets()
equity_total, equity_items = get_equity()
liabilities_total, liability_items = get_liabilities()
accounting_equation = (assets_total - (liabilities_total + equity_total))
return {
'assets': assets_items,
'liabilities': liability_items,
'equity': equity_items,
'accounting_equation': humanize(accounting_equation)
}
def get_income_statement(loan_ledger_balance):
return {
'revenue': [
LedgerAccount(
name="Interest Revenue",
balance=humanize(
loan_ledger_balance.interest_revenue_balance_bf +
loan_ledger_balance.interest_revenue_balance
),
children=[
]
)
],
'expense': {
'Principal Write-off Expense': humanize(
loan_ledger_balance.principal_write_off_expense_balance_bf +
loan_ledger_balance.principal_write_off_expense_balance
),
'Interest Write-off Expense': humanize(
loan_ledger_balance.interest_write_off_expense_balance_bf +
loan_ledger_balance.interest_write_off_expense_balance
),
'Fees Write-off Expense': humanize(
loan_ledger_balance.penalties_write_off_expense_balance_bf +
loan_ledger_balance.penalties_write_off_expense_balance +
loan_ledger_balance.fees_write_off_expense_balance_bf +
loan_ledger_balance.fees_write_off_expense_balance
),
'Mpesa Transaction Expense': humanize(
loan_ledger_balance.fees_revenue_balance_bf +
loan_ledger_balance.fees_revenue_balance
)}}
| 36.587234
| 82
| 0.57211
|
a61d9eeff4445bbb04e1f3e38f191f1f9f10720f
| 1,939
|
py
|
Python
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_shared/_generated/v7_0/aio/_configuration_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_shared/_generated/v7_0/aio/_configuration_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | 1
|
2019-06-04T18:12:16.000Z
|
2019-06-04T18:12:16.000Z
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_shared/_generated/v7_0/aio/_configuration_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.core.configuration import Configuration, ConnectionConfiguration
from azure.core.pipeline import policies
from ..version import VERSION
class KeyVaultClientConfiguration(Configuration):
"""Configuration for KeyVaultClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
"""
def __init__(self, credentials, **kwargs):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
super(KeyVaultClientConfiguration, self).__init__(**kwargs)
self._configure(**kwargs)
self.user_agent_policy.add_user_agent('azsdk-python-azure-keyvault/{}'.format(VERSION))
self.generate_client_request_id = True
self.credentials = credentials
def _configure(self, **kwargs):
self.connection = ConnectionConfiguration(**kwargs)
self.user_agent_policy = policies.UserAgentPolicy(**kwargs)
self.headers_policy = policies.HeadersPolicy(**kwargs)
self.proxy_policy = policies.ProxyPolicy(**kwargs)
self.logging_policy = policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = policies.AsyncRetryPolicy(**kwargs)
self.redirect_policy = policies.AsyncRedirectPolicy(**kwargs)
| 40.395833
| 95
| 0.680763
|
9e8c51f8ec76018811dc62a5379f9806a4f37444
| 37,850
|
py
|
Python
|
pyNastran/op2/tables/oes_stressStrain/oes_nonlinear.py
|
214929177/pyNastran
|
73032d6ffd445ef085c124dde6b5e90a516a5b6a
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/tables/oes_stressStrain/oes_nonlinear.py
|
214929177/pyNastran
|
73032d6ffd445ef085c124dde6b5e90a516a5b6a
|
[
"BSD-3-Clause"
] | 1
|
2021-06-07T16:33:59.000Z
|
2021-06-07T16:33:59.000Z
|
pyNastran/op2/tables/oes_stressStrain/oes_nonlinear.py
|
daptablade/pyNastran
|
247d4ae7a1a3f6a25cc1c3f3c286d6b32f1f84e7
|
[
"BSD-3-Clause"
] | 1
|
2021-12-17T10:45:08.000Z
|
2021-12-17T10:45:08.000Z
|
"""
defines:
- RealNonlinearPlateArray
"""
from math import isnan
from itertools import count, cycle
from typing import List
import numpy as np
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.result_objects.op2_objects import get_times_dtype
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import OES_Object
from pyNastran.f06.f06_formatting import _eigenvalue_header, write_float_11e, write_float_13e
class RealNonlinearPlateArray(OES_Object):
"""tested by elements/loadstep_elements.op2"""
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
self.nnodes = None
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
@property
def nnodes_per_element(self) -> int:
if self.element_type == 88: # Tria3-nonlinear
nnodes_per_element = 1
elif self.element_type == 90: # Quad4-nonlinear
nnodes_per_element = 1
#elif self.element_type == 144:
#nnodes_per_element = 5
#elif self.element_type == 64: # CQUAD8
#nnodes_per_element = 5
#elif self.element_type == 82: # CQUADR
#nnodes_per_element = 5
#elif self.element_type == 70: # CTRIAR
#nnodes_per_element = 4
#elif self.element_type == 75: # CTRIA6
#nnodes_per_element = 4
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return nnodes_per_element
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
@property
def is_stress(self):
return True
def get_headers(self) -> List[str]:
headers = [
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
'fiber_distance', 'oxx', 'oyy', 'ozz', 'txy',
'eff_plastic_strain', 'eff_plastic_strain', 'eff_creep_strain',
'exx', 'eyy', 'ezz', 'exy',
]
return headers
#def is_bilinear(self):
#if self.element_type in [33, 74]: # CQUAD4, CTRIA3
#return False
#elif self.element_type in [144, 64, 82, 70, 75]: # CQUAD4
#return True
#else:
#raise NotImplementedError('name=%s type=%s' % (self.element_name, self.element_type))
def build(self):
"""sizes the vectorized attributes of the RealNonlinearPlateArray"""
#print("self.ielement = %s" % self.ielement)
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
nnodes_per_element = self.nnodes_per_element
nnodes_per_element = 1
self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
if self.nelements % self.ntimes != 0:
msg = 'nelements=%s ntimes=%s nelements/ntimes=%s' % (
self.nelements, self.ntimes, self.nelements / float(self.ntimes))
#return
raise RuntimeError(msg)
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = np.zeros(self.ntimes, dtype=dtype)
self.element = np.zeros(self.nelements, dtype=idtype)
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
self.data = np.zeros((self.ntimes, self.ntotal, 12), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
nelements = self.element.shape[0]
nelements2 = self.data.shape[1]
is_two_layers = nelements * 2 == nelements2
headers = self.get_headers()
if is_two_layers:
names = ['ElementID', 'Location', 'Item']
element = np.vstack([self.element, self.element]).T.flatten()
if self.is_fiber_distance:
fiber_distance = ['Top', 'Bottom'] * nelements
else:
fiber_distance = ['Mean', 'Curvature'] * nelements
fd = np.array(fiber_distance, dtype='unicode')
element_fd = [
element,
fd,
]
iheader = 0
else:
names = ['ElementID', 'Item']
element_fd = [self.element]
iheader = 0
if self.nonlinear_factor not in (None, np.nan):
# TODO: this varies depending on ???
# - TestOP2.test_cgap_01
# - TestOP2.test_bdf_op2_other_24
#
#LoadStep 1.0
#ElementID Location Item
#7401 Top fiber_distance -1.200000e+00
# Bottom oxx -1.161999e+04
# Top oyy 1.450191e-01
# Bottom ozz 0.000000e+00
# Top txy 4.668588e-05
# Bottom eff_plastic_strain 1.162006e+04
# Top eff_plastic_strain 0.000000e+00
# Bottom eff_creep_strain 0.000000e+00
# Top exx -1.162003e-02
# Bottom eyy 3.486142e-03
# Top ezz 0.000000e+00
# Bottom exy 1.213833e-10
# Top fiber_distance 1.200000e+00
# Bottom oxx -1.161999e+04
# Top oyy 1.449644e-01
# Bottom ozz 0.000000e+00
# Top txy -4.668589e-05
# Bottom eff_plastic_strain 1.162006e+04
# Top eff_plastic_strain 0.000000e+00
# Bottom eff_creep_strain 0.000000e+00
# Top exx -1.162003e-02
# Bottom eyy 3.486142e-03
# Top ezz 0.000000e+00
# Bottom exy -1.213833e-10
#
#LoadStep 0.25 0.50
#ElementID Item
#1 oxx 1.725106e-05 1.075969e-05
# oyy 1.500000e+06 3.000000e+06
# ozz 0.000000e+00 0.000000e+00
# txy -1.751084e-10 2.152285e-09
# eff_plastic_strain 1.500000e+06 3.000000e+06
#... ... ...
#100 eff_creep_strain 0.000000e+00 0.000000e+00
# exx -2.024292e-06 -4.048583e-06
# eyy 6.747639e-06 1.349528e-05
# ezz 0.000000e+00 0.000000e+00
# exy 0.000000e+00 0.000000e+00
column_names, column_values = self._build_dataframe_transient_header()
#element = np.vstack([self.element, self.element]).T.flatten()
#element = self.element
#data_frame = self._build_pandas_transient_elements(
#column_values, column_names,
#headers, element, self.data[:, :, 1:])
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers[iheader:], element_fd, self.data[:, :, iheader:],
from_tuples=False, from_array=True,
names=names,
)
else:
# option B - nice!
df1 = pd.DataFrame(self.element).T
df1.columns = ['ElementID']
df2 = pd.DataFrame(self.data[0, :, 1:])
df2.columns = headers
data_frame = df1.join(df2)
data_frame = data_frame.reset_index().set_index(['ElementID'])
self.data_frame = data_frame
def add_new_eid_sort1(self, dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
self.element[self.ielement] = eid
self.ielement += 1
self.add_sort1(dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy)
def add_sort1(self, dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
if isnan(fd):
fd = 0.
if isnan(sz):
sz = 0.
if isnan(ez):
ez = 0.
self._times[self.itime] = dt
#if self.ielement == 10:
#print(self.element_node[:10, :])
#raise RuntimeError()
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
assert eid == self.element[self.ielement - 1], 'eid=%s self.element[i-1]=%s' % (eid, self.element[self.ielement - 1])
self.data[self.itime, self.itotal, :] = [fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy]
self.itotal += 1
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
# TODO: this name order is wrong
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
(fiber_distance1, oxx1, oyy1, ozz1, txy1, exx1, eyy1, ezz1, exy1, es1, eps1, ecs1) = t1
(fiber_distance2, oxx2, oyy2, ozz2, txy2, exx2, eyy2, ezz2, exy2, es2, eps2, ecs2) = t2
# vm stress can be NaN for some reason...
if not np.array_equal(t1, t2):
eid_spaces = ' ' * (len(str(eid)))
msg += (
# eid fd ox oy oz txy ex ey ez exy es eps ecs1
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
fiber_distance1, oxx1, oyy1, ozz1, txy1, exx1, eyy1, ezz1, exy1, es1, eps1, ecs1,
eid_spaces,
fiber_distance2, oxx2, oyy2, ozz2, txy2, exx2, eyy2, ezz2, exy2, es2, eps2, ecs2))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.nnodes
ntotal = self.ntotal
nlayers = 2
nelements = self.ntotal // self.nnodes // 2
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msgi = ' type=%s ntimes=%i nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i, table_name=%s\n' % (
self.__class__.__name__, ntimes, nelements, nnodes, nlayers, ntotal, self.table_name_str)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, nelements, nnodes, nlayers, ntotal)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
str(', '.join(headers))))
msg.append(' data.shape=%s\n' % str(self.data.shape))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1,
is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#msg, nnodes, cen = _get_plate_msg(self)
if self.element_type == 88:
msg = [
' N O N L I N E A R S T R E S S E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'
' \n'
' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
]
elif self.element_type == 90:
msg = [
' N O N L I N E A R S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
' \n'
' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
#'0 1 -2.500000E-02 -4.829193E+00 -1.640651E-05 -1.907010E-04 4.829185E+00 0.0 0.0\n'
#' -4.829188E-05 1.448741E-05 -4.958226E-09\n'
#' 2.500000E-02 4.770547E+00 1.493975E-04 1.907012E-04 4.770473E+00 0.0 0.0\n'
#' 4.770502E-05 -1.431015E-05 4.958231E-09\n'
]
else: # pragma: no cover
raise NotImplementedError('element_name=%s self.element_type=%s' % (self.element_name, self.element_type))
#msg = [
#' N O N L I N E A R S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
#' \n'
#' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
#' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
#'0 1 -2.500000E-02 -4.829193E+00 -1.640651E-05 -1.907010E-04 4.829185E+00 0.0 0.0\n'
#' -4.829188E-05 1.448741E-05 -4.958226E-09\n'
#' 2.500000E-02 4.770547E+00 1.493975E-04 1.907012E-04 4.770473E+00 0.0 0.0\n'
#' 4.770502E-05 -1.431015E-05 4.958231E-09\n'
#]
# write the f06
ntimes = self.data.shape[0]
eids = self.element
#cen_word = 'CEN/%i' % nnodes
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
fiber_dist = self.data[itime, :, 0]
oxx = self.data[itime, :, 1]
oyy = self.data[itime, :, 2]
ozz = self.data[itime, :, 3]
txy = self.data[itime, :, 4]
es = self.data[itime, :, 5]
eps = self.data[itime, :, 6]
ecs = self.data[itime, :, 7]
exx = self.data[itime, :, 8]
eyy = self.data[itime, :, 9]
ezz = self.data[itime, :, 10]
exy = self.data[itime, :, 11]
for (i, eid, fdi, oxxi, oyyi, ozzi, txyi, exxi, eyyi, ezzi, exyi, esi, epsi, ecsi) in zip(
cycle([0, 1]), eids, fiber_dist, oxx, oyy, ozz, txy, exx, eyy, ezz, exy, es, eps, ecs):
#[fdi, oxxi, oyyi, txyi, major, minor, ovmi] = write_floats_13e(
#[fdi, oxxi, oyyi, txyi, major, minor, ovmi])
#' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
#' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
#'0 1 -2.500000E-02 -4.829193E+00 -1.640651E-05 -1.907010E-04 4.829185E+00 0.0 0.0\n'
#' -4.829188E-05 1.448741E-05 -4.958226E-09\n'
#' 2.500000E-02 4.770547E+00 1.493975E-04 1.907012E-04 4.770473E+00 0.0 0.0\n'
#' 4.770502E-05 -1.431015E-05 4.958231E-09\n'
if i == 0:
f06_file.write(
'0 %8i %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %s\n' % (
# A
# ELEMENT FIBER XYZ STRESS EQUIVALENT EFF.STRAIN EFF.CREEP\n'
eid, write_float_13e(fdi),
write_float_13e(oxxi), write_float_13e(oyyi),
#write_float_13e(ozzi),
write_float_13e(txyi),
write_float_13e(esi), write_float_13e(epsi),
write_float_13e(ecsi),
write_float_13e(exxi), write_float_13e(eyyi),
#write_float_13e(ezzi),
write_float_13e(exyi),
))
else:
f06_file.write(
' %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %s\n' % (
write_float_13e(fdi),
write_float_13e(oxxi), write_float_13e(oyyi),
#write_float_13e(ozzi),
write_float_13e(txyi),
write_float_13e(esi), write_float_13e(epsi),
write_float_13e(ecsi),
write_float_13e(exxi), write_float_13e(eyyi),
#write_float_13e(ezzi),
write_float_13e(exyi),
)
)
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class RealNonlinearSolidArray(OES_Object):
"""tested by elements/loadstep_elements.op2"""
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
self.nnodes = None
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
@property
def nnodes_per_element(self) -> int:
if self.element_type == 85: # CTETRANL
nnodes_per_element = 5
elif self.element_type == 91: # CPENTANL
nnodes_per_element = 7
elif self.element_type == 93: # CHEXANL
nnodes_per_element = 9
elif self.element_type == 256: # CPYRAMNL
nnodes_per_element = 6
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return nnodes_per_element
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
@property
def is_stress(self):
return True
def get_headers(self) -> List[str]:
headers = [
'oxx', 'oyy', 'ozz', 'txy', 'tyz', 'txz',
'eff_plastic_strain', 'eff_plastic_strain', 'eff_creep_strain',
'exx', 'eyy', 'ezz', 'exy', 'eyz', 'exz',
]
return headers
#def is_bilinear(self):
#if self.element_type in [33, 74]: # CQUAD4, CTRIA3
#return False
#elif self.element_type in [144, 64, 82, 70, 75]: # CQUAD4
#return True
#else:
#raise NotImplementedError('name=%s type=%s' % (self.element_name, self.element_type))
def build(self):
"""sizes the vectorized attributes of the RealNonlinearPlateArray"""
#print("self.ielement = %s" % self.ielement)
#print(f'ntimes={self.ntimes} nelements={self.nelements} ntotal={self.ntotal} - {self.element_name}')
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
nnodes_per_element = self.nnodes_per_element
#nnodes_per_element = 1
self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
if self.nelements % self.ntimes != 0:
msg = 'nelements=%s ntimes=%s nelements/ntimes=%s' % (
self.nelements, self.ntimes, self.nelements / float(self.ntimes))
raise RuntimeError(msg)
self.nelements //= self.ntimes
assert self.nelements > 0
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = np.zeros(self.ntimes, dtype=dtype)
self.element_node = np.zeros((self.ntotal, 2), dtype='int32')
#[sx, sy, sz, sxy, syz, sxz, se, eps, ecs,
# ex, ey, ez, exy, eyz, exz]
self.data = np.zeros((self.ntimes, self.ntotal, 15), dtype='float32')
#def build_dataframe(self):
#"""creates a pandas dataframe"""
#import pandas as pd
#headers = self.get_headers()[1:]
##nelements = self.element.shape[0]
#if self.nonlinear_factor not in (None, np.nan):
#column_names, column_values = self._build_dataframe_transient_header()
#self.data_frame = pd.Panel(self.data[:, :, 1:], items=column_values, major_axis=self.element, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#self.data_frame.index.names = ['ElementID', 'Item']
#else:
## option B - nice!
#df1 = pd.DataFrame(self.element).T
#df1.columns = ['ElementID']
#df2 = pd.DataFrame(self.data[0, :, 1:])
#df2.columns = headers
#self.data_frame = df1.join(df2)
#self.data_frame = self.data_frame.reset_index().set_index(['ElementID'])
#print(self.data_frame)
def add_sort1(self, dt, eid, grid,
sx, sy, sz, sxy, syz, sxz, se, eps, ecs,
ex, ey, ez, exy, eyz, exz):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#if isnan(fd):
#fd = 0.
#if isnan(sz):
#sz = 0.
#if isnan(ez):
#ez = 0.
self._times[self.itime] = dt
#if self.ielement == 10:
#print(self.element_node[:10, :])
#raise RuntimeError()
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
#assert eid == self.element[self.ielement - 1], 'eid=%s self.element[i-1]=%s' % (eid, self.element[self.ielement - 1])
#print(self.element_node.shape, self.itotal)
self.element_node[self.itotal, :] = [eid, grid]
#a = [sx, sy, sz, sxy, syz, sxz, se, eps, ecs,
#ex, ey, ez, exy, eyz, exz]
self.data[self.itime, self.itotal, :] = [sx, sy, sz, sxy, syz, sxz, se, eps, ecs,
ex, ey, ez, exy, eyz, exz]
self.itotal += 1
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
for itime in range(self.ntimes):
for ie, eid, nid in zip(count(), eids, nids):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
# TODO: this name order is wrong?
#[sx, sy, sz, sxy, syz, sxz, se, eps, ecs,
# ex, ey, ez, exy, eyz, exz]
(sx1, sy1, sz1, sxy1, syz1, sxz1, se1, eps1, ecs1, ex1, ey1, ez1, exy1, eyz1, exz1) = t1
(sx2, sy2, sz2, sxy2, syz2, sxz2, se2, eps2, ecs2, ex2, ey2, ez2, exy2, eyz2, exz2) = t2
if not np.array_equal(t1, t2):
eid_spaces = ' ' * (len(str(eid)))
msg += (
# eid sx, sy, sz,sxy,syz,sxz, se,eps,ecs, ex, ey, ez, exy, eyz, exz
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
sx1, sy1, sz1, sxy1, syz1, sxz1, se1, eps1, ecs1, ex1, ey1, ez1, exy1, eyz1, exz1,
eid_spaces,
sx2, sy2, sz2, sxy2, syz2, sxz2, se2, eps2, ecs2, ex2, ey2, ez2, exy2, eyz2, exz2))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.nnodes
ntotal = self.ntotal
#ntotal = self.ntotal * nnodes
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msgi = ' type=%s ntimes=%i nelements=%i nnodes_per_element=%i ntotal=%i, table_name=%s\n' % (
self.__class__.__name__, ntimes, nelements, nnodes, ntotal, self.table_name_str)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i nnodes_per_element=%i ntotal=%i\n' % (
self.__class__.__name__, nelements, nnodes, ntotal)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
str(', '.join(headers))))
msg.append(' data.shape=%s\n' % str(self.data.shape))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1,
is_mag_phase=False, is_sort1=True): # pragma: no cover
if header is None:
header = []
#raise NotImplementedError('RealNonlinearSolidArray.write_f06')
#msg, nnodes, cen = _get_plate_msg(self)
#if self.element_type == 85:
##etype = 'CTETRANL'
#nnodes_per_element = 5
#elif self.element_type == 91:
##etype = 'CPENTANL'
#nnodes_per_element = 7
#elif self.element_type == 93:
##etype = 'CHEXANL'
#nnodes_per_element = 9
#else:
#raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
if self.element_type == 85:
msg = [
' N O N L I N E A R S T R E S S E S I N T E T R A H E D R O N S O L I D E L E M E N T S ( T E T R A )'
' \n'
' ELEMENT GRID/ POINT STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID GAUSS ID X Y Z XY YZ ZX STRESS PLAS/NLELAS STRAIN\n'
]
elif self.element_type == 91:
msg = [
' N O N L I N E A R S T R E S S E S I N P E N T A H E D R O N S O L I D E L E M E N T S ( P E N T A )'
' \n'
' ELEMENT GRID/ POINT STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID GAUSS ID X Y Z XY YZ ZX STRESS PLAS/NLELAS STRAIN\n'
]
elif self.element_type == 93:
msg = [
' N O N L I N E A R S T R E S S E S I N H E X A H E D R O N S O L I D E L E M E N T S ( H E X A )\n'
' \n'
' ELEMENT GRID/ POINT STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID GAUSS ID X Y Z XY YZ ZX STRESS PLAS/NLELAS STRAIN\n'
#'0 1 GRID CENTER 1.0000E+04 1.5916E-12 1.3642E-12 -3.5862E-13 8.3400E-14 0.0 1.0000E+04 0.0 0.0'
#' 1.5626E-03 -4.6877E-04 -4.6877E-04 -1.4569E-19 3.3881E-20 0.0'
#' 1 1.0000E+04 -1.8190E-12 4.5475E-13 -6.3308E-13 7.4789E-13 -4.6225E-13 1.0000E+04 0.0 0.0'
#' 1.5626E-03 -4.6877E-04 -4.6877E-04 -2.5719E-19 3.0383E-19 -1.8779E-19'
]
elif self.element_type == 256:
msg = [
' N O N L I N E A R S T R E S S E S I N P Y R A M I D S O L I D E L E M E N T S ( P Y R A M )\n'
' \n'
' ELEMENT GRID/ POINT STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID GAUSS ID X Y Z XY YZ ZX STRESS PLAS/NLELAS STRAIN\n'
]
else: # pragma: no cover
raise NotImplementedError('element_name=%s self.element_type=%s' % (self.element_name, self.element_type))
# write the f06
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
#cen_word = 'CEN/%i' % nnodes
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#oxx, oyy, ozz, txy, tyz, txz, se, eps, ecs,
#exx, eyy, ezz, exy, eyz, exz
oxx = self.data[itime, :, 0]
oyy = self.data[itime, :, 1]
ozz = self.data[itime, :, 2]
txy = self.data[itime, :, 3]
tyz = self.data[itime, :, 4]
txz = self.data[itime, :, 5]
se = self.data[itime, :, 6]
eps = self.data[itime, :, 7]
ecs = self.data[itime, :, 8]
exx = self.data[itime, :, 9]
eyy = self.data[itime, :, 10]
ezz = self.data[itime, :, 11]
exy = self.data[itime, :, 12]
eyz = self.data[itime, :, 13]
exz = self.data[itime, :, 14]
#oxx, oyy, ozz, txy, tyz, txz, se, eps, ecs,
#exx, eyy, ezz, exy, eyz, exz
for (eid, nid, oxxi, oyyi, ozzi, txyi, tyzi, txzi, sei, epsi, ecsi,
exxi, eyyi, ezzi, exyi, eyzi, exzi) in zip(
eids, nids, oxx, oyy, ozz, txy, tyz, txz, se, eps, ecs,
exx, eyy, ezz, exy, eyz, exz):
#' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
#' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
#'0 1 GRID CENTER 1.0000E+04 1.5916E-12 1.3642E-12 -3.5862E-13 8.3400E-14 0.0 1.0000E+04 0.0 0.0'
#' 1.5626E-03 -4.6877E-04 -4.6877E-04 -1.4569E-19 3.3881E-20 0.0'
#' 1 1.0000E+04 -1.8190E-12 4.5475E-13 -6.3308E-13 7.4789E-13 -4.6225E-13 1.0000E+04 0.0 0.0'
#' 1.5626E-03 -4.6877E-04 -4.6877E-04 -2.5719E-19 3.0383E-19 -1.8779E-19'
if nid == 0:
#nid = ' CENTER'
#assert len(nid) == 8
f06_file.write(
'0%8i GRID CENTER %-11s %-11s %-11s %-11s %-11s %-11s %-11s %-11s %s\n'
' %-11s %-11s %-11s %-11s %-11s %-11s\n' % (
# A
#oxxi, oyyi, ozzi, txyi, tyzi, txzi, sei, epsi, ecsi,
#exxi, eyyi, ezzi, exyi, eyzi, exzi
# ELEMENT FIBER XYZ STRESS EQUIVALENT EFF.STRAIN EFF.CREEP\n'
eid,
write_float_11e(oxxi), write_float_11e(oyyi), write_float_11e(ozzi),
write_float_11e(txyi), write_float_11e(tyzi), write_float_11e(txzi),
write_float_11e(sei), write_float_11e(epsi), write_float_11e(ecsi),
write_float_11e(exxi), write_float_11e(eyyi), write_float_11e(ezzi),
write_float_11e(exyi), write_float_11e(eyzi), write_float_11e(exzi),
)
)
else:
f06_file.write(
' %8s %8s %-11s %-11s %-11s %-11s %-11s %-11s %-11s %-11s %s\n'
' %-11s %-11s %-11s %-11s %-11s %s\n' % (
# A
#oxxi, oyyi, ozzi, txyi, tyzi, txzi, sei, epsi, ecsi,
#exxi, eyyi, ezzi, exyi, eyzi, exzi
# ELEMENT FIBER XYZ STRESS EQUIVALENT EFF.STRAIN EFF.CREEP\n'
'', nid,
write_float_11e(oxxi), write_float_11e(oyyi), write_float_11e(ozzi),
write_float_11e(txyi), write_float_11e(tyzi), write_float_11e(txzi),
write_float_11e(sei), write_float_11e(epsi), write_float_11e(ecsi),
write_float_11e(exxi), write_float_11e(eyyi), write_float_11e(ezzi),
write_float_11e(exyi), write_float_11e(eyzi), write_float_11e(exzi),
)
)
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
| 48.525641
| 151
| 0.472761
|
c312905ab3d15b2d7a0c62aa1f4ddd37e4ad2e19
| 533
|
py
|
Python
|
code/python/third_party/Deadline/JobTaskLimit.py
|
mikeroberts3000/ml-hypersim
|
75b363ee52fbdbd0cc9b554c34c1aadea404183e
|
[
"AML"
] | 10
|
2020-11-17T00:33:42.000Z
|
2022-02-16T23:31:58.000Z
|
code/python/third_party/Deadline/JobTaskLimit.py
|
mikeroberts3000/ml-hypersim
|
75b363ee52fbdbd0cc9b554c34c1aadea404183e
|
[
"AML"
] | null | null | null |
code/python/third_party/Deadline/JobTaskLimit.py
|
mikeroberts3000/ml-hypersim
|
75b363ee52fbdbd0cc9b554c34c1aadea404183e
|
[
"AML"
] | null | null | null |
from .ConnectionProperty import ConnectionProperty
class JobTaskLimit:
"""
Class used by DeadlineCon to send Job task limit requests.
Stores the address of the Web Service for use in sending requests.
"""
def __init__(self, connectionProperties):
self.connectionProperties = connectionProperties
def GetJobTaskLimit(self):
""" Gets the Job task limit.
Returns: The Job task limit.
"""
return self.connectionProperties.__get__("/api/jobtasklimit")
| 35.533333
| 74
| 0.673546
|
f4c7bb43f1f26c1127760d1f3a445258169f1be5
| 1,180
|
py
|
Python
|
dsbot/expertise/ExpertiseAnalyzer.py
|
jefrysastre/dsbot
|
5afbfc34b2846f13d118df70160513935331983d
|
[
"MIT"
] | null | null | null |
dsbot/expertise/ExpertiseAnalyzer.py
|
jefrysastre/dsbot
|
5afbfc34b2846f13d118df70160513935331983d
|
[
"MIT"
] | null | null | null |
dsbot/expertise/ExpertiseAnalyzer.py
|
jefrysastre/dsbot
|
5afbfc34b2846f13d118df70160513935331983d
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import pickle
from .datascience_glossary import DataScienceGlossary
class ExpertiseAnalyzer():
def __init__(self):
self.path = 'data/user.settings.pickle'
self.user_config = {
'text_expertise': 0.5,
'commands':{
'speaking': 0.125,
'algorithm': 0.125,
'preprocessing': 0.125,
'visualization': 0.125,
'pipe': 0.125,
'metric': 0.125,
'code': 0.125,
'other': 0.125
}
}
if not os.path.exists(self.path):
with open(self.path, 'wb') as file:
pickle.dump(self.user_config, file)
else:
with open(self.path, 'rb') as file:
self.user_config = pickle.load(file)
self.analyzers = [
DataScienceGlossary()
]
def analyze(self, sentence):
for analyzer in self.analyzers:
analyzer.analyze(sentence, self.user_config)
# update user settings file
with open(self.path, 'wb') as file:
pickle.dump(self.user_config, file)
| 26.818182
| 56
| 0.518644
|
d7b419008236dc9e39a7a975c5400d8675323c98
| 15,062
|
py
|
Python
|
fritzbox_exporter.py
|
jonilala796/fritzbox_exporter
|
04ac1709cee9dfa59b15797e6745812c62afc5f5
|
[
"Apache-2.0"
] | null | null | null |
fritzbox_exporter.py
|
jonilala796/fritzbox_exporter
|
04ac1709cee9dfa59b15797e6745812c62afc5f5
|
[
"Apache-2.0"
] | null | null | null |
fritzbox_exporter.py
|
jonilala796/fritzbox_exporter
|
04ac1709cee9dfa59b15797e6745812c62afc5f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Patrick Dreker <patrick@dreker.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import json
import fritzconnection as fc
import prometheus_client
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
class FritzBoxConnection:
def __init__(self, host, user, passwd):
self.host = host
self.user = user
self.passwd = passwd
self.conn = None
def connect(self):
self.conn = fc.FritzConnection(address=self.host, user=self.user, password=self.passwd)
class FritzBoxCollector(object):
def get_fritzbox_list(self):
boxlist = list()
if os.path.exists(self.config_file):
with open(self.config_file, 'r') as fh:
json_config = json.loads(fh.read())
if json_config is None or type(json_config) is not list:
raise ValueError("Failed to read json data from configuration")
for json_entry in json_config:
boxlist.append(FritzBoxConnection(
json_entry['host'],
json_entry['username'],
json_entry['password'],
))
if os.getenv('FRITZ_USER') is not None and os.getenv('FRITZ_PASS') is not None:
boxlist.append(FritzBoxConnection(
os.getenv('FRITZ_HOST', 'fritz.box'),
os.getenv('FRITZ_USER'),
os.getenv('FRITZ_PASS')
))
for box in boxlist:
box.connect()
return boxlist
def __init__(self, config_file):
self.config_file = config_file
self.boxes = self.get_fritzbox_list()
def collect(self):
if len(self.boxes) == 0:
print("Skipping collect(), no boxes configured!")
return
fritzbox_uptime = CounterMetricFamily('fritzbox_uptime', 'FritzBox uptime, system info in labels',
labels=['ModelName', 'SoftwareVersion', 'Serial'])
fritzbox_update = GaugeMetricFamily('fritzbox_update_available', 'FritzBox update available',
labels=['Serial', 'NewSoftwareVersion'])
fritzbox_lanenable = GaugeMetricFamily('fritzbox_lan_status_enabled', 'LAN Interface enabled',
labels=['Serial'])
fritzbox_lanstatus = GaugeMetricFamily('fritzbox_lan_status', 'LAN Interface status', labels=['Serial'])
fritzbox_lan_brx = CounterMetricFamily('fritzbox_lan_received_bytes', 'LAN bytes received', labels=['Serial'])
fritzbox_lan_btx = CounterMetricFamily('fritzbox_lan_transmitted_bytes', 'LAN bytes transmitted',
labels=['Serial'])
fritzbox_lan_prx = CounterMetricFamily('fritzbox_lan_received_packets_total', 'LAN packets received',
labels=['Serial'])
fritzbox_lan_ptx = CounterMetricFamily('fritzbox_lan_transmitted_packets_total', 'LAN packets transmitted',
labels=['Serial'])
fritzbox_dsl_enable = GaugeMetricFamily('fritzbox_dsl_status_enabled', 'DSL enabled', labels=['Serial'])
fritzbox_dsl_status = GaugeMetricFamily('fritzbox_dsl_status', 'DSL status', labels=['Serial'])
fritzbox_dsl_datarate = GaugeMetricFamily('fritzbox_dsl_datarate_kbps', 'DSL datarate in kbps',
labels=['Serial', 'Direction', 'Type'])
fritzbox_internet_online_monitor = GaugeMetricFamily('fritzbox_internet_online_monitor', 'Online-Monitor stats in bps',
labels=['Serial', 'Direction', 'Type'])
fritzbox_dsl_noisemargin = GaugeMetricFamily('fritzbox_dsl_noise_margin_dB', 'Noise Margin in dB',
labels=['Serial', 'Direction'])
fritzbox_dsl_attenuation = GaugeMetricFamily('fritzbox_dsl_attenuation_dB', 'Line attenuation in dB',
labels=['Serial', 'Direction'])
fritzbox_ppp_uptime = GaugeMetricFamily('fritzbox_ppp_connection_uptime', 'PPP connection uptime',
labels=['Serial'])
fritzbox_ppp_connected = GaugeMetricFamily('fritzbox_ppp_conection_state', 'PPP connection state',
labels=['Serial', 'last_error'])
fritzbox_wan_data = CounterMetricFamily('fritzbox_wan_data_bytes', 'WAN data in bytes',
labels=['Serial', 'Direction'])
fritzbox_wan_packets = CounterMetricFamily('fritzbox_wan_data_packets', 'WAN data in packets',
labels=['Serial', 'Direction'])
fritzbox_fec_errors = GaugeMetricFamily('fritzbox_dsl_errors_fec', 'FEC errors', labels=['Serial'])
fritzbox_crc_errors = GaugeMetricFamily('fritzbox_dsl_errors_crc', 'CRC Errors', labels=['Serial'])
fritzbox_dsl_upstream_power = GaugeMetricFamily('fritzbox_dsl_power_upstream', 'Upstream Power',
labels=['Serial'])
fritzbox_dsl_downstream_power = GaugeMetricFamily('fritzbox_dsl_power_downstream', 'Downstream Power',
labels=['Serial'])
for box in self.boxes:
try:
connection = box.conn
info_result = connection.call_action('DeviceInfo:1', 'GetInfo')
fb_serial = info_result['NewSerialNumber']
# fritzbox_uptime
fritzbox_uptime.add_metric(
[info_result['NewModelName'], info_result['NewSoftwareVersion'], fb_serial],
info_result['NewUpTime']
)
# fritzbox_update_available
update_result = connection.call_action('UserInterface:1', 'GetInfo')
upd_available = 1 if update_result['NewUpgradeAvailable'] == '1' else 0
new_software_version = "n/a" if update_result['NewX_AVM-DE_Version'] is None else update_result[
'NewX_AVM-DE_Version']
fritzbox_update.add_metric([fb_serial, new_software_version], upd_available)
# fritzbox_lan_status_enabled
lanstatus_result = connection.call_action('LANEthernetInterfaceConfig:1', 'GetInfo')
fritzbox_lanenable.add_metric([fb_serial], lanstatus_result['NewEnable'])
# fritzbox_lan_status
lanstatus = 1 if lanstatus_result['NewStatus'] == 'Up' else 0
fritzbox_lanstatus.add_metric([fb_serial], lanstatus)
# fritzbox_lan_received_bytes
# fritzbox_lan_transmitted_bytes
# fritzbox_lan_received_packets_total
# fritzbox_lan_transmitted_packets_total
lanstats_result = connection.call_action('LANEthernetInterfaceConfig:1', 'GetStatistics')
fritzbox_lan_brx.add_metric([fb_serial], lanstats_result['NewBytesReceived'])
fritzbox_lan_btx.add_metric([fb_serial], lanstats_result['NewBytesSent'])
fritzbox_lan_prx.add_metric([fb_serial], lanstats_result['NewPacketsReceived'])
fritzbox_lan_ptx.add_metric([fb_serial], lanstats_result['NewPacketsSent'])
# fritzbox_dsl_status_enabled
# fritzbox_dsl_status
fritzbox_dslinfo_result = connection.call_action('WANDSLInterfaceConfig:1', 'GetInfo')
fritzbox_dsl_enable.add_metric([fb_serial], fritzbox_dslinfo_result['NewEnable'])
dslstatus = 1 if fritzbox_dslinfo_result['NewStatus'] == 'Up' else 0
fritzbox_dsl_status.add_metric([fb_serial], dslstatus)
# fritzbox_dsl_datarate_kbps
fritzbox_dsl_datarate.add_metric([fb_serial, 'up', 'curr'],
fritzbox_dslinfo_result['NewUpstreamCurrRate'])
fritzbox_dsl_datarate.add_metric([fb_serial, 'down', 'curr'],
fritzbox_dslinfo_result['NewDownstreamCurrRate'])
fritzbox_dsl_datarate.add_metric([fb_serial, 'up', 'max'],
fritzbox_dslinfo_result['NewUpstreamMaxRate'])
fritzbox_dsl_datarate.add_metric([fb_serial, 'down', 'max'],
fritzbox_dslinfo_result['NewDownstreamMaxRate'])
# fritzbox_internet_online_monitor
online_monitor = connection.call_action('WANCommonInterfaceConfig', 'X_AVM-DE_GetOnlineMonitor',
arguments={"NewSyncGroupIndex": 0})
fritzbox_internet_online_monitor.add_metric([fb_serial, 'up', 'max'], online_monitor['Newmax_us'])
fritzbox_internet_online_monitor.add_metric([fb_serial, 'down', 'max'], online_monitor['Newmax_ds'])
fritzbox_internet_online_monitor.add_metric([fb_serial, 'up', 'curr'], online_monitor['Newus_current_bps'].split(',')[0])
fritzbox_internet_online_monitor.add_metric([fb_serial, 'down', 'curr'], online_monitor['Newds_current_bps'].split(',')[0])
# fritzbox_dsl_noise_margin_dB
fritzbox_dsl_noisemargin.add_metric([fb_serial, 'up'],
fritzbox_dslinfo_result['NewUpstreamNoiseMargin'] / 10)
fritzbox_dsl_noisemargin.add_metric([fb_serial, 'down'],
fritzbox_dslinfo_result['NewDownstreamNoiseMargin'] / 10)
# fritzbox_dsl_attenuation_dB
fritzbox_dsl_attenuation.add_metric([fb_serial, 'up'],
fritzbox_dslinfo_result['NewUpstreamAttenuation'] / 10)
fritzbox_dsl_attenuation.add_metric([fb_serial, 'down'],
fritzbox_dslinfo_result['NewDownstreamAttenuation'] / 10)
# fritzbox_ppp_connection_uptime
# fritzbox_ppp_conection_state
fritzbox_pppstatus_result = connection.call_action('WANPPPConnection:1', 'GetStatusInfo')
pppconnected = 1 if fritzbox_pppstatus_result['NewConnectionStatus'] == 'Connected' else 0
fritzbox_ppp_uptime.add_metric([fb_serial], fritzbox_pppstatus_result['NewUptime'])
fritzbox_ppp_connected.add_metric([fb_serial, fritzbox_pppstatus_result['NewLastConnectionError']],
pppconnected)
# fritzbox_wan_data_bytes
fritzbox_wan_result = connection.call_action('WANCommonIFC1', 'GetAddonInfos')
wan_bytes_rx = fritzbox_wan_result['NewX_AVM_DE_TotalBytesReceived64']
wan_bytes_tx = fritzbox_wan_result['NewX_AVM_DE_TotalBytesSent64']
fritzbox_wan_data.add_metric([fb_serial, 'up'], wan_bytes_tx)
fritzbox_wan_data.add_metric([fb_serial, 'down'], wan_bytes_rx)
# fritzbox_wan_data_packets
fritzbox_wan_result = connection.call_action('WANCommonInterfaceConfig:1', 'GetTotalPacketsReceived')
wan_packets_rx = fritzbox_wan_result['NewTotalPacketsReceived']
fritzbox_wan_result = connection.call_action('WANCommonInterfaceConfig:1', 'GetTotalPacketsSent')
wan_packets_tx = fritzbox_wan_result['NewTotalPacketsSent']
fritzbox_wan_packets.add_metric([fb_serial, 'up'], wan_packets_tx)
fritzbox_wan_packets.add_metric([fb_serial, 'down'], wan_packets_rx)
# fritzbox_dsl_errors_*
statistics_total = connection.call_action('WANDSLInterfaceConfig1', 'X_AVM-DE_GetDSLInfo')
fritzbox_crc_errors.add_metric([fb_serial], statistics_total['NewCRCErrors'])
fritzbox_fec_errors.add_metric([fb_serial], statistics_total['NewFECErrors'])
# fritzbox_dsl_power_*
fritzbox_dsl_upstream_power.add_metric([fb_serial], statistics_total['NewUpstreamPower'])
fritzbox_dsl_downstream_power.add_metric([fb_serial], statistics_total['NewDownstreamPower'])
except Exception as e:
print("Error fetching metrics for FB " + box.host)
yield fritzbox_uptime
yield fritzbox_update
yield fritzbox_lanenable
yield fritzbox_lanstatus
yield fritzbox_lan_brx
yield fritzbox_lan_btx
yield fritzbox_lan_prx
yield fritzbox_lan_ptx
yield fritzbox_dsl_enable
yield fritzbox_dsl_status
yield fritzbox_dsl_datarate
yield fritzbox_internet_online_monitor
yield fritzbox_dsl_noisemargin
yield fritzbox_dsl_attenuation
yield fritzbox_ppp_uptime
yield fritzbox_ppp_connected
yield fritzbox_wan_data
yield fritzbox_wan_packets
yield fritzbox_fec_errors
yield fritzbox_crc_errors
yield fritzbox_dsl_upstream_power
yield fritzbox_dsl_downstream_power
def get_configuration():
collectors = list()
if os.path.exists('settings.json'):
with open('settings.json', 'r') as fh:
configuration = json.loads(fh.read())
if configuration is not None:
if type(configuration) is list:
for entry in configuration:
if 'host' in entry and 'username' in entry and 'password' in entry:
collectors.append(
FritzBoxCollector(entry['host'], entry['username'], entry['password']))
if os.getenv('FRITZ_USER') is not None and os.getenv('FRITZ_PASS') is not None:
collectors.append(
FritzBoxCollector(os.getenv('FRITZ_HOST', 'fritz.box'), os.getenv('FRITZ_USER'), os.getenv('FRITZ_PASS')))
return collectors
if __name__ == '__main__':
REGISTRY.register(FritzBoxCollector('settings.json'))
# Start up the server to expose the metrics.
print("Starting Server at " + str(os.getenv('FRITZ_EXPORTER_PORT', 8765)))
prometheus_client.start_http_server(os.getenv('FRITZ_EXPORTER_PORT', 8765))
while True:
time.sleep(10000)
| 53.985663
| 139
| 0.621033
|
6b955bea5aeca0b8dc9d7af6c871515b334fc6f2
| 499
|
py
|
Python
|
setup.py
|
NativeDesign/python-firmware
|
f5ecbbef62f57e27f883fab82a680871a00b1b88
|
[
"MIT"
] | null | null | null |
setup.py
|
NativeDesign/python-firmware
|
f5ecbbef62f57e27f883fab82a680871a00b1b88
|
[
"MIT"
] | null | null | null |
setup.py
|
NativeDesign/python-firmware
|
f5ecbbef62f57e27f883fab82a680871a00b1b88
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='ntv-firmware',
version='1.0.2',
description='Utility function for locating the serial port of a particular firmware device.',
url='https://git.native.com/ntv/python-firmware',
author='Alan Pich',
author_email='alanp@native.com',
license='MIT',
packages=['ntv_firmware'],
install_requires=[
],
entry_points={
# 'console_scripts': ['eopdev=eopdev.cli:eopdev']
},
zip_safe=False)
| 29.352941
| 99
| 0.633267
|
956a29d160d9db68a65b6a65a0b84591c57d4e88
| 3,115
|
py
|
Python
|
src/stay_classification/metrics_cluster_tools.py
|
m-salewski/stay_classification
|
e3f9deadf51c97029a0f9a4bb669a5af68abf7c6
|
[
"MIT"
] | null | null | null |
src/stay_classification/metrics_cluster_tools.py
|
m-salewski/stay_classification
|
e3f9deadf51c97029a0f9a4bb669a5af68abf7c6
|
[
"MIT"
] | null | null | null |
src/stay_classification/metrics_cluster_tools.py
|
m-salewski/stay_classification
|
e3f9deadf51c97029a0f9a4bb669a5af68abf7c6
|
[
"MIT"
] | null | null | null |
import numpy as np
from synthetic_data.trajectory import get_stay_indices, get_adjusted_stays
from sklearn.metrics import precision_score, recall_score, accuracy_score, confusion_matrix
from stay_classification.cluster_helper import inter_bounds
#TODO
# [x] move all generic functions related to measuring/manipulating clusters to cluster_helper
# [ ] update all dependencies of this submodule
def get_accumulated_result(fnt, iterable):
"""
Iterate through clusters and accumulate the total length
"""
accumulated_result = 0
for iter_elem in iterable:
accumulated_result += fnt(iter_elem)
return accumulated_result
# TODO gather these into cluster_helper
get_time_duration = lambda t_arr: lambda ind1, ind2: abs(t_arr[ind2]-t_arr[ind1])
get_clust_duration = lambda t_arr: lambda clust: abs(t_arr[clust[-1]]-t_arr[clust[0]])
get_clust_length = lambda clust: len(clust)
subcluster_lengths = lambda cluster_list: [len(c) for c in cluster_list]
# def get_clusters_duration(get_clust_duration(t_arr),clusts)
def get_clusters_duration(t_arr, clusts):
"""
Iterate through clusters and accumulate the total duration
"""
accumulated_result = 0
for clust in clusts:
accumulated_result += get_clust_duration(t_arr)(clust)
return accumulated_result
# def get_clusters_duration(get_clust_length,clusts)
def get_clusters_length(t_arr, clusts):
"""
Iterate through clusters and accumulate the total length
"""
accumulated_result = 0
for clust in clusts:
accumulated_result += get_clust_length(clust)
return accumulated_result
def get_subcluster_labels(t_arr):
"""
Get the stay (1) and travel (0) labels for a single cluster
"""
def meth(clust):
labels = np.zeros(t_arr.shape)
labels[clust[0]:clust[-1]+1] = 1
return labels
return meth
#TODO: this is a duplicate method; use a closure: get_all_labels(shape)(clusters)
def get_labels_from_clusters(clusters, shape):
"""
Get the stay (1) and travel (0) labels from a set of clusters
"""
# Loop through the clusters to get the end points;
# create array of one & zeros (stays & travels)
labels = np.zeros(shape)
for clust in clusters:
labels[clust[0]:clust[-1]+1] = 1
return labels
#TODO: this is a duplicate method
def get_pred_labels(clusters, shape):
"""
Get the stay (1) and travel (0) labels from a set of clusters
"""
# Loop through the clusters to get the end points;
# create array of one & zeros (stays & travels)
pred_labels = np.zeros(shape)
for clust in clusters:
pred_labels[clust[0]:clust[-1]+1] = 1
return pred_labels
def get_true_clusters(t_arr, segments):
"""
Get the predicted and true number of clusters
"""
true_clusters = []
for n in range(0,len(segments),2):
indices = get_stay_indices(get_adjusted_stays(segments[n:n+1], t_arr), t_arr)[0]
true_clusters.append(list(range(indices[0],indices[-1]+1)))
return true_clusters
| 28.577982
| 93
| 0.697271
|
ef238d6e39f457f53d21e49d49e2904b12483774
| 2,870
|
py
|
Python
|
api_watchdog/formatters/result_group_html.py
|
davefol/api-watchdog
|
c1ba8022e4f8fcaea020d3bd55fa7f88fb7418f7
|
[
"MIT"
] | null | null | null |
api_watchdog/formatters/result_group_html.py
|
davefol/api-watchdog
|
c1ba8022e4f8fcaea020d3bd55fa7f88fb7418f7
|
[
"MIT"
] | 7
|
2021-12-03T15:40:10.000Z
|
2022-03-31T13:13:24.000Z
|
api_watchdog/formatters/result_group_html.py
|
davefol/api-watchdog
|
c1ba8022e4f8fcaea020d3bd55fa7f88fb7418f7
|
[
"MIT"
] | 1
|
2021-12-03T21:50:23.000Z
|
2021-12-03T21:50:23.000Z
|
from api_watchdog.collect import WatchdogResultGroup
from api_watchdog.core import WatchdogResult, ExpectationResult
def html_from_result_group(result_group: WatchdogResultGroup) -> str:
def group_format(result_group: WatchdogResultGroup):
html = (
f'<h2>{result_group.name}</h2>'
)
for result in sorted(result_group.results, key=lambda g: g.test_name):
html += result_format(result)
for child_result_group in sorted(result_group.groups, key=lambda g: g.name):
html += group_format(child_result_group)
return html
def result_format(result: WatchdogResult) -> str:
passed = "Pass" if result.success else "Fail"
html = (
f'<h3>{result.test_name}: {passed} ({result.latency:.3f}s)</h3>\n'
f'<div class="expectations">\n'
)
for expectation_result in result.results:
html += expectation_result_format(expectation_result)
html += (
f'</div>\n'
)
return html
def expectation_result_format(expectation_result: ExpectationResult) -> str:
success_class_name = "passed" if expectation_result.result == "success" else "failed"
level_class_name = expectation_result.expectation.level.value
class_name = success_class_name + "-" + level_class_name # outlook and some other renderers do not support AND style selectors
html = (
f'<div class="result {class_name}">\n'
f' <p>{expectation_result.expectation.selector}</p>\n'
f' <p>({expectation_result.expectation.validation_type.value}){expectation_result.expectation.value}</p>\n'
f' <p>{expectation_result.actual} ({level_class_name.upper()})</p>\n'
f'</div>'
)
return html
html = """
<html>
<head>
<title>
Watchdog Test Report
</title>
<style type="text/css">
body{
margin:40px auto;
max-width:650px;
line-height:1.6;
font-size:18px;
color:#444;
padding:0 10px;
}
h1, h2, h3 {
line-height:1.2;
}
code {
border: 1px solid #ddd;
background-color: #f8f8f8;
font-family:'Lucida Console', monospace;
}
.result {
font-size: 12px;
padding-left: 16px;
border-radius: 8px;
border: 1px solid #ddd;
font-family:'Lucida Console', monospace;
}
.passed-critical, .passed-warning {
background-color: #dbfad9;
}
.failed-critical {
background-color: #fadad9;
}
.failed-warning {
background-color: #fff9db;
}
.passed-info, .failed-info {
background-color: #d9d9d9;
}
</style>
</head>
<body>
<h1>Watchdog Results Report</h1>
"""
html += group_format(result_group)
html += """
</body>
</html>
"""
return html
| 28.137255
| 134
| 0.605923
|
d9167a48bf830a6d9b18dbdbd253d06539fec6c8
| 643
|
py
|
Python
|
lecture3/tests/question-3_7.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | 1
|
2019-01-12T12:43:24.000Z
|
2019-01-12T12:43:24.000Z
|
lecture3/tests/question-3_7.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | null | null | null |
lecture3/tests/question-3_7.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | 3
|
2019-05-16T21:08:48.000Z
|
2022-02-21T06:54:57.000Z
|
test = {
'name': 'question 3.7',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> np.allclose(myprimes, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> type(myprimes) == list
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': """
import numpy as np
""",
'teardown': '',
'type': 'doctest'
}
]
}
| 19.484848
| 133
| 0.353033
|
5c09f94273e2c8907ac7a0ad316529ad22a61b0e
| 432
|
py
|
Python
|
running_modes/reinforcement_learning/configurations/link_invent_scoring_strategy_congfiguration.py
|
lilleswing/Reinvent-1
|
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
|
[
"Apache-2.0"
] | 183
|
2020-04-04T02:01:15.000Z
|
2022-03-30T21:56:56.000Z
|
running_modes/reinforcement_learning/configurations/link_invent_scoring_strategy_congfiguration.py
|
lilleswing/Reinvent-1
|
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
|
[
"Apache-2.0"
] | 39
|
2020-04-05T15:19:56.000Z
|
2022-03-09T12:58:21.000Z
|
running_modes/reinforcement_learning/configurations/link_invent_scoring_strategy_congfiguration.py
|
lilleswing/Reinvent-1
|
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
|
[
"Apache-2.0"
] | 70
|
2020-04-05T19:25:43.000Z
|
2022-02-22T12:04:39.000Z
|
from dataclasses import dataclass
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import \
DiversityFilterParameters
from running_modes.reinforcement_learning.configurations.scoring_strategy_configuration import \
ScoringStrategyConfiguration
@dataclass
class LinkInventScoringStrategyConfiguration(ScoringStrategyConfiguration):
diversity_filter: DiversityFilterParameters
| 33.230769
| 98
| 0.886574
|
61cc72611a4596cd580f9583dcb420a073aec733
| 8,575
|
py
|
Python
|
indico/modules/events/editing/blueprint.py
|
ParthS007/indico
|
5031dd03e16c8021bb41f419ec9f2b6cdfe2148d
|
[
"MIT"
] | 1
|
2021-02-24T10:20:14.000Z
|
2021-02-24T10:20:14.000Z
|
indico/modules/events/editing/blueprint.py
|
ParthS007/indico
|
5031dd03e16c8021bb41f419ec9f2b6cdfe2148d
|
[
"MIT"
] | null | null | null |
indico/modules/events/editing/blueprint.py
|
ParthS007/indico
|
5031dd03e16c8021bb41f419ec9f2b6cdfe2148d
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.editing.controllers import frontend
from indico.modules.events.editing.controllers.backend import common, editable_list, management, service, timeline
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('event_editing', __name__, url_prefix='/event/<int:event_id>', template_folder='templates',
virtual_template_folder='events/editing')
# Frontend (management)
_bp.add_url_rule('/manage/editing/', 'dashboard', frontend.RHEditingDashboard)
_bp.add_url_rule('/manage/editing/tags', 'manage_tags', frontend.RHEditingDashboard)
_bp.add_url_rule('/manage/editing/<any(paper,slides,poster):type>/', 'manage_editable_type',
frontend.RHEditingDashboard)
_bp.add_url_rule('/manage/editing/<any(paper,slides,poster):type>/list', 'manage_editable_type_list',
frontend.RHEditingDashboard)
_bp.add_url_rule('/manage/editing/<any(paper,slides,poster):type>/types', 'manage_file_types',
frontend.RHEditingDashboard)
_bp.add_url_rule('/manage/editing/<any(paper,slides,poster):type>/review-conditions', 'manage_review_conditions',
frontend.RHEditingDashboard)
# Frontend (timeline)
contrib_prefix = '/contributions/<int:contrib_id>/editing/<any(paper,slides,poster):type>'
_bp.add_url_rule(contrib_prefix, 'editable', frontend.RHEditableTimeline)
_bp.add_url_rule('/editing/<any(paper,slides,poster):type>/list', 'editable_type_list', frontend.RHEditableTypeList)
_bp.add_url_rule(contrib_prefix + '/<int:revision_id>/files.zip', 'revision_files_export',
timeline.RHExportRevisionFiles)
_bp.add_url_rule(contrib_prefix + '/<int:revision_id>/<int:file_id>/<filename>', 'download_file',
timeline.RHDownloadRevisionFile)
# Non-API backend
_bp.add_url_rule('/manage/editing/<any(paper,slides,poster):type>/contact-team', 'contact_team',
management.RHContactEditingTeam)
# Event-level APIs
review_cond_prefix = '/editing/api/<any(paper,slides,poster):type>/review-conditions'
_bp.add_url_rule(review_cond_prefix, 'api_review_conditions',
management.RHEditingReviewConditions, methods=('GET', 'POST'))
_bp.add_url_rule(review_cond_prefix + '/<int:condition_id>', 'api_edit_review_condition',
management.RHEditingEditReviewCondition, methods=('DELETE', 'PATCH'))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/file-types', 'api_file_types',
common.RHEditingFileTypes)
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/file-types', 'api_add_file_type',
management.RHCreateFileType, methods=('POST',))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/list', 'api_editable_list',
editable_list.RHEditableList)
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/file-types/<int:file_type_id>', 'api_edit_file_type',
management.RHEditFileType, methods=('PATCH', 'DELETE'))
_bp.add_url_rule('/editing/api/tags', 'api_tags', common.RHEditingTags)
_bp.add_url_rule('/editing/api/tags', 'api_create_tag', management.RHCreateTag, methods=('POST',))
_bp.add_url_rule('/editing/api/tag/<int:tag_id>', 'api_edit_tag', management.RHEditTag, methods=('PATCH', 'DELETE'))
_bp.add_url_rule('/editing/api/menu-entries', 'api_menu_entries', common.RHMenuEntries)
_bp.add_url_rule('/editing/api/enabled-editable-types', 'api_enabled_editable_types',
management.RHEnabledEditableTypes, methods=('GET', 'POST'))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editable-assignment/self-assign-enabled',
'api_self_assign_enabled', management.RHEditableSetSelfAssign, methods=('GET', 'PUT', 'DELETE'))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/principals', 'api_editable_type_principals',
management.RHEditableTypePrincipals, methods=('GET', 'POST'))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/submission-enabled',
'api_submission_enabled', management.RHEditableSetSubmission, methods=('GET', 'PUT', 'DELETE'))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editing-enabled',
'api_editing_enabled', management.RHEditableSetEditing, methods=('GET', 'PUT', 'DELETE'))
_bp.add_url_rule('/editing/api/service/check-url', 'api_check_service_url', service.RHCheckServiceURL)
_bp.add_url_rule('/editing/api/service/connect', 'api_service_connect', service.RHConnectService, methods=('POST',))
_bp.add_url_rule('/editing/api/service/disconnect', 'api_service_disconnect', service.RHDisconnectService,
methods=('POST',))
_bp.add_url_rule('/editing/api/service/status', 'api_service_status', service.RHServiceStatus)
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editables/prepare-archive',
'api_prepare_editables_archive', editable_list.RHPrepareEditablesArchive, methods=('POST',))
_bp.add_url_rule('/editing/<any(paper,slides,poster):type>/editables/archive/<uuid:uuid>.zip', 'download_archive',
editable_list.RHDownloadArchive)
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editables/assign', 'api_assign_editor',
editable_list.RHAssignEditor, methods=('POST',))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editables/assign/me', 'api_assign_myself',
editable_list.RHAssignMyselfAsEditor, methods=('POST',))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editables/unassign', 'api_unassign_editor',
editable_list.RHUnassignEditor, methods=('POST',))
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editables/self-assign-allowed',
'api_editor_self_assign_allowed', common.RHEditableCheckSelfAssign)
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/editors', 'api_editable_type_editors',
management.RHEditableTypeEditors)
_bp.add_url_rule('/editing/api/<any(paper,slides,poster):type>/list-with-filetypes',
'api_filter_editables_by_filetypes', editable_list.RHFilterEditablesByFileTypes, methods=('POST',))
# Editable-level APIs
contrib_api_prefix = '/api' + contrib_prefix
_bp.add_url_rule(contrib_api_prefix + '/editor/', 'api_unassign_editable', timeline.RHEditableUnassign,
methods=('DELETE',))
_bp.add_url_rule(contrib_api_prefix + '/editor/me', 'api_assign_editable_self', timeline.RHEditableAssignMe,
methods=('PUT',))
# Contribution/revision-level APIs
_bp.add_url_rule(contrib_api_prefix, 'api_editable', timeline.RHEditable)
_bp.add_url_rule(contrib_api_prefix, 'api_create_editable', timeline.RHCreateEditable, methods=('PUT',))
_bp.add_url_rule(contrib_api_prefix + '/upload', 'api_upload', timeline.RHEditingUploadFile, methods=('POST',))
_bp.add_url_rule(contrib_api_prefix + '/add-paper-file', 'api_add_paper_file',
timeline.RHEditingUploadPaperLastRevision, methods=('POST',))
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/review', 'api_review_editable',
timeline.RHReviewEditable, methods=('POST',))
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/confirm', 'api_confirm_changes',
timeline.RHConfirmEditableChanges, methods=('POST',),)
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/replace', 'api_replace_revision',
timeline.RHReplaceRevision, methods=('POST',))
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/new', 'api_create_submitter_revision',
timeline.RHCreateSubmitterRevision, methods=('POST',),)
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/review', 'api_undo_review',
timeline.RHUndoReview, methods=('DELETE',))
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/comments/', 'api_create_comment',
timeline.RHCreateRevisionComment, methods=('POST',),)
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/comments/<int:comment_id>',
'api_edit_comment', timeline.RHEditRevisionComment, methods=('PATCH', 'DELETE'),)
_bp.add_url_rule(contrib_api_prefix + '/<int:revision_id>/custom-action', 'api_custom_action',
timeline.RHTriggerExtraRevisionAction, methods=('POST',),)
| 71.458333
| 116
| 0.734344
|
c11fb3d7e6eafcc951eb7b4c042412070a202699
| 15,914
|
py
|
Python
|
python/ccxt/async_support/__init__.py
|
jofre-lab/ccxt
|
7c4082897bdf91b556fbf0a0e4f0ac9ad5eef365
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/__init__.py
|
jofre-lab/ccxt
|
7c4082897bdf91b556fbf0a0e4f0ac9ad5eef365
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/__init__.py
|
jofre-lab/ccxt
|
7c4082897bdf91b556fbf0a0e4f0ac9ad5eef365
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.40.14'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.async_support.acx import acx # noqa: F401
from ccxt.async_support.aofex import aofex # noqa: F401
from ccxt.async_support.bcex import bcex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitcoincom import bitcoincom # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bitget import bitget # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitkk import bitkk # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmax import bitmax # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitpanda import bitpanda # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitvavo import bitvavo # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.bleutrade import bleutrade # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bybit import bybit # noqa: F401
from ccxt.async_support.bytetrade import bytetrade # noqa: F401
from ccxt.async_support.cdax import cdax # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.chilebit import chilebit # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coingi import coingi # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.currencycom import currencycom # noqa: F401
from ccxt.async_support.delta import delta # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.dsx import dsx # noqa: F401
from ccxt.async_support.eterbase import eterbase # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.fcoin import fcoin # noqa: F401
from ccxt.async_support.fcoinjp import fcoinjp # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.foxbit import foxbit # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.gopax import gopax # noqa: F401
from ccxt.async_support.hbtc import hbtc # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hollaex import hollaex # noqa: F401
from ccxt.async_support.huobijp import huobijp # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.ice3x import ice3x # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.lakebtc import lakebtc # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.novadax import novadax # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoin import okcoin # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.phemex import phemex # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.probit import probit # noqa: F401
from ccxt.async_support.qtrade import qtrade # noqa: F401
from ccxt.async_support.rightbtc import rightbtc # noqa: F401
from ccxt.async_support.ripio import ripio # noqa: F401
from ccxt.async_support.southxchange import southxchange # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.surbitcoin import surbitcoin # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vaultoro import vaultoro # noqa: F401
from ccxt.async_support.vbtc import vbtc # noqa: F401
from ccxt.async_support.vcc import vcc # noqa: F401
from ccxt.async_support.wavesexchange import wavesexchange # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xbtce import xbtce # noqa: F401
from ccxt.async_support.xena import xena # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'acx',
'aofex',
'bcex',
'bequant',
'bibox',
'bigone',
'binance',
'binanceus',
'bit2c',
'bitbank',
'bitbay',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitkk',
'bitmart',
'bitmax',
'bitmex',
'bitpanda',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'bytetrade',
'cdax',
'cex',
'chilebit',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'dsx',
'eterbase',
'exmo',
'exx',
'fcoin',
'fcoinjp',
'flowbtc',
'foxbit',
'ftx',
'gateio',
'gemini',
'gopax',
'hbtc',
'hitbtc',
'hollaex',
'huobijp',
'huobipro',
'ice3x',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'lakebtc',
'latoken',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mixcoins',
'novadax',
'oceanex',
'okcoin',
'okex',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'rightbtc',
'ripio',
'southxchange',
'stex',
'surbitcoin',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vaultoro',
'vbtc',
'vcc',
'wavesexchange',
'whitebit',
'xbtce',
'xena',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 50.84345
| 86
| 0.556554
|
7705002706da21b4282c435cf38c7d3acbde8eb4
| 13,995
|
py
|
Python
|
frappe/model/naming.py
|
ssuda777/frappe
|
d3f3df2ce15154aecc1d9d6d07d947e72c2e8c6e
|
[
"MIT"
] | 1
|
2021-06-11T10:28:07.000Z
|
2021-06-11T10:28:07.000Z
|
frappe/model/naming.py
|
JMBodz/frappe
|
eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d
|
[
"MIT"
] | 3
|
2021-08-23T15:20:28.000Z
|
2022-03-27T07:47:36.000Z
|
frappe/model/naming.py
|
JMBodz/frappe
|
eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d
|
[
"MIT"
] | 1
|
2021-04-10T12:01:10.000Z
|
2021-04-10T12:01:10.000Z
|
"""utilities to generate a document name based on various rules defined.
NOTE:
Till version 13, whenever a submittable document is amended it's name is set to orig_name-X,
where X is a counter and it increments when amended again and so on.
From Version 14, The naming pattern is changed in a way that amended documents will
have the original name `orig_name` instead of `orig_name-X`. To make this happen
the cancelled document naming pattern is changed to 'orig_name-CANC-X'.
"""
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
from frappe import _
from frappe.utils import now_datetime, cint, cstr
import re
from frappe.model import log_types
def set_new_name(doc):
"""
Sets the `name` property for the document based on various rules.
1. If amended doc, set suffix.
2. If `autoname` method is declared, then call it.
3. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
4. If no rule defined, use hash.
:param doc: Document to be named.
"""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname or ""
if autoname.lower() != "prompt" and not frappe.flags.in_import:
doc.name = None
if getattr(doc, "amended_from", None):
doc.name = _get_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif getattr(doc.meta, "istable", False):
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
set_naming_from_document_naming_rule(doc)
if not doc.name:
doc.run_method("autoname")
if not doc.name and autoname:
set_name_from_naming_options(autoname, doc)
# if the autoname option is 'field:' and no name was derived, we need to
# notify
if not doc.name and autoname.startswith("field:"):
fieldname = autoname[6:]
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
# at this point, we fall back to name generation with the hash option
if not doc.name and autoname == "hash":
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
doc.name = make_autoname("hash", doc.doctype)
doc.name = validate_name(
doc.doctype,
doc.name,
frappe.get_meta(doc.doctype).get_field("name_case")
)
def set_name_from_naming_options(autoname, doc):
"""
Get a name based on the autoname field option
"""
_autoname = autoname.lower()
if _autoname.startswith("field:"):
doc.name = _field_autoname(autoname, doc)
elif _autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif _autoname.startswith("prompt"):
_prompt_autoname(autoname, doc)
elif _autoname.startswith("format:"):
doc.name = _format_autoname(autoname, doc)
elif "#" in autoname:
doc.name = make_autoname(autoname, doc=doc)
def set_naming_from_document_naming_rule(doc):
'''
Evaluate rules based on "Document Naming Series" doctype
'''
if doc.doctype in log_types:
return
# ignore_ddl if naming is not yet bootstrapped
for d in frappe.get_all('Document Naming Rule',
dict(document_type=doc.doctype, disabled=0), order_by='priority desc', ignore_ddl=True):
frappe.get_cached_doc('Document Naming Rule', d.name).apply(doc)
if doc.name:
break
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+".#####", "", doc)
def make_autoname(key="", doctype="", doc=""):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key == "hash":
return frappe.generate_hash(doctype, 10)
if "#" not in key:
key = key + ".#####"
elif "." not in key:
error_message = _("Invalid naming series (. missing)")
if doctype:
error_message = _("Invalid naming series (. missing) for {0}").format(doctype)
frappe.throw(error_message)
parts = key.split('.')
n = parse_naming_series(parts, doctype, doc)
return n
def parse_naming_series(parts, doctype='', doc=''):
n = ''
if isinstance(parts, str):
parts = parts.split('.')
series_set = False
today = now_datetime()
for e in parts:
part = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
part = getseries(n, digits)
series_set = True
elif e == 'YY':
part = today.strftime('%y')
elif e == 'MM':
part = today.strftime('%m')
elif e == 'DD':
part = today.strftime("%d")
elif e == 'YYYY':
part = today.strftime('%Y')
elif e == 'timestamp':
part = str(today)
elif e == 'FY':
part = frappe.defaults.get_user_default("fiscal_year")
elif e.startswith('{') and doc:
e = e.replace('{', '').replace('}', '')
part = doc.get(e)
elif doc and doc.get(e):
part = doc.get(e)
else:
part = e
if isinstance(part, str):
n += part
return n
def getseries(key, digits):
# series created ?
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` + 1 WHERE `name`=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("INSERT INTO `tabSeries` (`name`, `current`) VALUES (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name, doc=None):
"""
Reverts the series for particular naming series:
* key is naming series - SINV-.YYYY-.####
* name is actual name - SINV-2021-0001
1. This function split the key into two parts prefix (SINV-YYYY) & hashes (####).
2. Use prefix to get the current index of that naming series from Series table
3. Then revert the current index.
*For custom naming series:*
1. hash can exist anywhere, if it exist in hashes then it take normal flow.
2. If hash doesn't exit in hashes, we get the hash from prefix, then update name and prefix accordingly.
*Example:*
1. key = SINV-.YYYY.-
* If key doesn't have hash it will add hash at the end
* prefix will be SINV-YYYY based on this will get current index from Series table.
2. key = SINV-.####.-2021
* now prefix = SINV-#### and hashes = 2021 (hash doesn't exist)
* will search hash in key then accordingly get prefix = SINV-
3. key = ####.-2021
* prefix = #### and hashes = 2021 (hash doesn't exist)
* will search hash in key then accordingly get prefix = ""
"""
if hasattr(doc, 'amended_from'):
# Do not revert the series if the document is amended.
if doc.amended_from:
return
# Get document name by parsing incase of fist cancelled document
if doc.docstatus == 2 and not doc.amended_from:
if doc.name.endswith('-CANC'):
name, _ = NameParser.parse_docname(doc.name, sep='-CANC')
else:
name, _ = NameParser.parse_docname(doc.name, sep='-CANC-')
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
# get the hash part from the key
hash = re.search("#+", key)
if not hash:
return
name = name.replace(hashes, "")
prefix = prefix.replace(hash.group(), "")
else:
prefix = key
if '.' in prefix:
prefix = parse_naming_series(prefix.split('.'), doc=doc)
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` - 1 WHERE `name`=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name:
frappe.throw(_("No Name Specified for {0}").format(doctype))
if name.startswith("New "+doctype):
frappe.throw(_("There were some errors setting the name, please contact the administrator"), frappe.NameError)
if case == "Title Case":
name = name.title()
if case == "UPPER CASE":
name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name != "DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
special_characters = "<>"
if re.findall("[{0}]+".format(special_characters), name):
message = ", ".join("'{0}'".format(c) for c in special_characters)
frappe.throw(_("Name cannot contain special characters like {0}").format(message), frappe.NameError)
return name
def append_number_if_name_exists(doctype, value, fieldname="name", separator="-", filters=None):
if not filters:
filters = dict()
filters.update({fieldname: value})
exists = frappe.db.exists(doctype, filters)
regex = "^{value}{separator}\\d+$".format(value=re.escape(value), separator=separator)
if exists:
last = frappe.db.sql("""SELECT `{fieldname}` FROM `tab{doctype}`
WHERE `{fieldname}` {regex_character} %s
ORDER BY length({fieldname}) DESC,
`{fieldname}` DESC LIMIT 1""".format(
doctype=doctype,
fieldname=fieldname,
regex_character=frappe.db.REGEX_CHARACTER),
regex)
if last:
count = str(cint(last[0][0].rsplit(separator, 1)[1]) + 1)
else:
count = "1"
value = "{0}{1}{2}".format(value, separator, count)
return value
def _get_amended_name(doc):
name, _ = NameParser(doc).parse_amended_from()
return name
def _field_autoname(autoname, doc, skip_slicing=None):
"""
Generate a name using `DocType` field. This is called when the doctype's
`autoname` field starts with 'field:'
"""
fieldname = autoname if skip_slicing else autoname[6:]
name = (cstr(doc.get(fieldname)) or "").strip()
return name
def _prompt_autoname(autoname, doc):
"""
Generate a name using Prompt option. This simply means the user will have to set the name manually.
This is called when the doctype's `autoname` field starts with 'prompt'.
"""
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via prompt"))
def _format_autoname(autoname, doc):
"""
Generate autoname by replacing all instances of braced params (fields, date params ('DD', 'MM', 'YY'), series)
Independent of remaining string or separators.
Example pattern: 'format:LOG-{MM}-{fieldname1}-{fieldname2}-{#####}'
"""
first_colon_index = autoname.find(":")
autoname_value = autoname[first_colon_index + 1:]
def get_param_value_for_match(match):
param = match.group()
# trim braces
trimmed_param = param[1:-1]
return parse_naming_series([trimmed_param], doc=doc)
# Replace braced params with their parsed value
name = re.sub(r"(\{[\w | #]+\})", get_param_value_for_match, autoname_value)
return name
class NameParser:
"""Parse document name and return parts of it.
NOTE: It handles cancellend and amended doc parsing for now. It can be expanded.
"""
def __init__(self, doc):
self.doc = doc
def parse_amended_from(self):
"""
Cancelled document naming will be in one of these formats
* original_name-X-CANC - This is introduced to migrate old style naming to new style
* original_name-CANC - This is introduced to migrate old style naming to new style
* original_name-CANC-X - This is the new style naming
New style naming: In new style naming amended documents will have original name. That says,
when a document gets cancelled we need rename the document by adding `-CANC-X` to the end
so that amended documents can use the original name.
Old style naming: cancelled documents stay with original name and when amended, amended one
gets a new name as `original_name-X`. To bring new style naming we had to change the existing
cancelled document names and that is done by adding `-CANC` to cancelled documents through patch.
"""
if not getattr(self.doc, 'amended_from', None):
return (None, None)
# Handle old style cancelled documents (original_name-X-CANC, original_name-CANC)
if self.doc.amended_from.endswith('-CANC'):
name, _ = self.parse_docname(self.doc.amended_from, '-CANC')
amended_from_doc = frappe.get_all(
self.doc.doctype,
filters = {'name': self.doc.amended_from},
fields = ['amended_from'],
limit=1)
# Handle format original_name-X-CANC.
if amended_from_doc and amended_from_doc[0].amended_from:
return self.parse_docname(name, '-')
return name, None
# Handle new style cancelled documents
return self.parse_docname(self.doc.amended_from, '-CANC-')
@classmethod
def parse_docname(cls, name, sep='-'):
split_list = name.rsplit(sep, 1)
if len(split_list) == 1:
return (name, None)
return (split_list[0], split_list[1])
def get_cancelled_doc_latest_counter(tname, docname):
"""Get the latest counter used for cancelled docs of given docname.
"""
name_prefix = f'{docname}-CANC-'
rows = frappe.db.sql("""
select
name
from `tab{tname}`
where
name like %(name_prefix)s and docstatus=2
""".format(tname=tname), {'name_prefix': name_prefix+'%'}, as_dict=1)
if not rows:
return -1
return max([int(row.name.replace(name_prefix, '') or -1) for row in rows])
def gen_new_name_for_cancelled_doc(doc):
"""Generate a new name for cancelled document.
"""
if getattr(doc, "amended_from", None):
name, _ = NameParser(doc).parse_amended_from()
else:
name = doc.name
counter = get_cancelled_doc_latest_counter(doc.doctype, name)
return f'{name}-CANC-{counter+1}'
| 30.962389
| 112
| 0.698464
|
6118c3788ac0663b5ca161cf224dcb49d41377c7
| 177
|
py
|
Python
|
examples/ps_rockstar.py
|
hoojaoh/rockstar
|
2cb911be76fc93692c180d629f0b282d672ea8f7
|
[
"MIT"
] | 4,603
|
2015-07-16T20:11:28.000Z
|
2022-03-21T23:51:47.000Z
|
examples/ps_rockstar.py
|
hoojaoh/rockstar
|
2cb911be76fc93692c180d629f0b282d672ea8f7
|
[
"MIT"
] | 90
|
2015-07-18T11:51:33.000Z
|
2021-05-10T02:45:58.000Z
|
examples/ps_rockstar.py
|
hoojaoh/rockstar
|
2cb911be76fc93692c180d629f0b282d672ea8f7
|
[
"MIT"
] | 436
|
2015-07-16T22:10:50.000Z
|
2022-02-15T04:53:19.000Z
|
from rockstar import RockStar
ps_code = "Write-Host 'Hello World!'"
rock_it_bro = RockStar(days=400, file_name='helloWorld.ps1', code=ps_code)
rock_it_bro.make_me_a_rockstar()
| 29.5
| 74
| 0.79096
|
a97099e252ea9249c4cf2dc169369400d3bcd75c
| 224
|
py
|
Python
|
unit_converter/app.py
|
clash402/unit-converter
|
12bfc741687328c88f2a04fc2dac469ff30ca50f
|
[
"MIT"
] | null | null | null |
unit_converter/app.py
|
clash402/unit-converter
|
12bfc741687328c88f2a04fc2dac469ff30ca50f
|
[
"MIT"
] | null | null | null |
unit_converter/app.py
|
clash402/unit-converter
|
12bfc741687328c88f2a04fc2dac469ff30ca50f
|
[
"MIT"
] | null | null | null |
from unit_converter.ui.ui import UI
from unit_converter.converter import Converter
class App:
def __init__(self):
self.ui = UI(Converter())
# PUBLIC METHODS
def start(self):
self.ui.mainloop()
| 18.666667
| 46
| 0.674107
|
c68e1220a592e203f1cf775fc8de466d12e42533
| 2,741
|
py
|
Python
|
setup_resnet.py
|
piotrek-k/nn_robust_attacks
|
3995e4daaa2fd913ad89528bcf06557ebf446b59
|
[
"BSD-2-Clause"
] | null | null | null |
setup_resnet.py
|
piotrek-k/nn_robust_attacks
|
3995e4daaa2fd913ad89528bcf06557ebf446b59
|
[
"BSD-2-Clause"
] | null | null | null |
setup_resnet.py
|
piotrek-k/nn_robust_attacks
|
3995e4daaa2fd913ad89528bcf06557ebf446b59
|
[
"BSD-2-Clause"
] | null | null | null |
# https://github.com/shoji9x9/CIFAR-10-By-small-ResNet/blob/master/ResNet-for-CIFAR-10-with-Keras.ipynb
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, Activation, MaxPool2D, GlobalAveragePooling2D, Add, Input, Flatten
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
cifar = tf.keras.datasets.cifar10
class RESNET_Model:
def __init__(self):
self.num_channels = 3
self.image_size = 32
self.num_labels = 10
self.model = None
def build_resnet(self):
n = 9 # 56 layers
channels = [16, 32, 64]
inputs = Input(shape=(32, 32, 3))
x = Conv2D(channels[0], kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(inputs)
x = BatchNormalization()(x)
x = Activation(tf.nn.relu)(x)
for c in channels:
for i in range(n):
subsampling = i == 0 and c > 16
strides = (2, 2) if subsampling else (1, 1)
y = Conv2D(c, kernel_size=(3, 3), padding="same", strides=strides, kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(x)
y = BatchNormalization()(y)
y = Activation(tf.nn.relu)(y)
y = Conv2D(c, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(y)
y = BatchNormalization()(y)
if subsampling:
x = Conv2D(c, kernel_size=(1, 1), strides=(2, 2), padding="same", kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(x)
x = Add()([x, y])
x = Activation(tf.nn.relu)(x)
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
outputs = Dense(10, activation=tf.nn.softmax, kernel_initializer="he_normal")(x)
self.model = Model(inputs=inputs, outputs=outputs)
self.model.type = "resnet" + str(6 * n + 2)
def train(self):
(x_train, y_train), (x_test, y_test) = cifar.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.model.fit(x_train, y_train, epochs=5)
self.model.evaluate(x_test, y_test)
resnet_model = RESNET_Model()
resnet_model.build_resnet()
resnet_model.model.summary()
resnet_model.train()
| 38.605634
| 137
| 0.601605
|
09bead1c803a3caa367bd7245aee16fd14cab1fd
| 414
|
py
|
Python
|
info/modules/index/views.py
|
xzp820017462/information27
|
b48010c7ff8f25da69e98621123a1408573a3cba
|
[
"MIT"
] | null | null | null |
info/modules/index/views.py
|
xzp820017462/information27
|
b48010c7ff8f25da69e98621123a1408573a3cba
|
[
"MIT"
] | null | null | null |
info/modules/index/views.py
|
xzp820017462/information27
|
b48010c7ff8f25da69e98621123a1408573a3cba
|
[
"MIT"
] | null | null | null |
from flask import current_app
from flask import render_template
from . import index_blu
from info import redis_store
@index_blu.route('/')
def index():
#redis_store.set("name","itheima")
#redis_store.set("name","itcast")
return render_template('news/index.html')
#在打开网页的时候.会默认请求根路径favicon.ico
@index_blu.route('/favicon.ico')
def favicon():
return current_app.send_static_file('news/favicon.ico')
| 27.6
| 59
| 0.753623
|
24afc57ff1e5dcf7eea37be1c0bdfac4419ec2cf
| 1,910
|
py
|
Python
|
python/logger.py
|
Xanonymous-GitHub/main
|
53120110bd8dc9ab33424fa26d1a8ca5b9256ebe
|
[
"Apache-2.0"
] | 1
|
2019-09-27T17:46:41.000Z
|
2019-09-27T17:46:41.000Z
|
python/logger.py
|
Xanonymous-GitHub/main
|
53120110bd8dc9ab33424fa26d1a8ca5b9256ebe
|
[
"Apache-2.0"
] | null | null | null |
python/logger.py
|
Xanonymous-GitHub/main
|
53120110bd8dc9ab33424fa26d1a8ca5b9256ebe
|
[
"Apache-2.0"
] | 5
|
2019-09-30T16:41:14.000Z
|
2019-10-25T11:13:39.000Z
|
import pythoncom
import pyHook
import time
'''
def onMouseEvent(event):
"處理鼠標事件"
fobj.writelines('-' * 20 + 'MouseEvent Begin' + '-' * 20 + '\n')
fobj.writelines("Current Time:%s\n" % time.strftime("%a, %d %b %Y %H:%M:%S ", time.gmtime()))
fobj.writelines("MessageName:%s\n" % str(event.MessageName))
fobj.writelines("Message:%d\n" % event.Message)
fobj.writelines( "Time_sec:%d\n" % event.Time)
fobj.writelines("Window:%s\n" % str(event.Window))
fobj.writelines("WindowName:%s\n" % str(event. WindowName))
fobj.writelines("Position:%s\n" % str(event.Position))
fobj.writelines('-' * 20 + 'MouseEvent End' + '-' * 20 + '\n')
return True
'''
def onKeyboardEvent(event):
"處理鍵盤事件"
fobj.writelines('-' * 20 + ' Keyboard Begin' + '-' * 20 + '\n')
fobj.writelines("Current Time:%s\n" % time.strftime(
"%a, %d %b %Y %H:%M:% S", time.gmtime()))
fobj.writelines("MessageName:%s\n" % str(event.MessageName))
fobj.writelines("Message:%d\n" % event.Message)
fobj.writelines("Time:%d\n" % event.Time)
fobj.writelines("Window:%s\n" % str(event.Window))
fobj.writelines("WindowName:%s\n" % str(event .WindowName))
fobj.writelines("Ascii_code: %d\n" % event.Ascii)
fobj.writelines("Ascii_char:%s\n" % chr(event.Ascii))
fobj.writelines("Key:%s\n" % str(event.Key))
fobj.writelines('-' * 20 + 'Keyboard End' + '-' * 20 + '\n')
return True
if __name__ == "__main__":
# 打開日誌文件
file_name = "D:\Xanonymous\Downloads\hook_log.txt"
fobj = open(file_name, 'w')
# 創建hook句柄
hm = pyHook.HookManager()
# 監控鍵盤
hm.KeyDown = onKeyboardEvent
hm.HookKeyboard()
'''
#監控鼠標
hm.MouseAll = onMouseEvent
hm.HookMouse()
'''
# 循環獲取消息
pythoncom.PumpMessages()
# 關閉日誌文件
fobj.close()
| 31.311475
| 99
| 0.581675
|
2bbbb9e2cb352d9f22fffb931cea8840e73ad57a
| 3,997
|
py
|
Python
|
tests/test_stats.py
|
FingerCrunch/scrapy
|
3225de725720bba246ba8c9845fe4b84bc0c82e7
|
[
"BSD-3-Clause"
] | 41,267
|
2015-01-01T07:39:25.000Z
|
2022-03-31T20:09:40.000Z
|
tests/test_stats.py
|
FingerCrunch/scrapy
|
3225de725720bba246ba8c9845fe4b84bc0c82e7
|
[
"BSD-3-Clause"
] | 4,420
|
2015-01-02T09:35:38.000Z
|
2022-03-31T22:53:32.000Z
|
tests/test_stats.py
|
FingerCrunch/scrapy
|
3225de725720bba246ba8c9845fe4b84bc0c82e7
|
[
"BSD-3-Clause"
] | 11,080
|
2015-01-01T18:11:30.000Z
|
2022-03-31T15:33:19.000Z
|
from datetime import datetime
import unittest
from unittest import mock
from scrapy.extensions.corestats import CoreStats
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector, DummyStatsCollector
from scrapy.utils.test import get_crawler
class CoreStatsExtensionTest(unittest.TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('foo')
@mock.patch('scrapy.extensions.corestats.datetime')
def test_core_stats_default_stats_collector(self, mock_datetime):
fixed_datetime = datetime(2019, 12, 1, 11, 38)
mock_datetime.utcnow = mock.Mock(return_value=fixed_datetime)
self.crawler.stats = StatsCollector(self.crawler)
ext = CoreStats.from_crawler(self.crawler)
ext.spider_opened(self.spider)
ext.item_scraped({}, self.spider)
ext.response_received(self.spider)
ext.item_dropped({}, self.spider, ZeroDivisionError())
ext.spider_closed(self.spider, 'finished')
self.assertEqual(
ext.stats._stats,
{
'start_time': fixed_datetime,
'finish_time': fixed_datetime,
'item_scraped_count': 1,
'response_received_count': 1,
'item_dropped_count': 1,
'item_dropped_reasons_count/ZeroDivisionError': 1,
'finish_reason': 'finished',
'elapsed_time_seconds': 0.0,
}
)
def test_core_stats_dummy_stats_collector(self):
self.crawler.stats = DummyStatsCollector(self.crawler)
ext = CoreStats.from_crawler(self.crawler)
ext.spider_opened(self.spider)
ext.item_scraped({}, self.spider)
ext.response_received(self.spider)
ext.item_dropped({}, self.spider, ZeroDivisionError())
ext.spider_closed(self.spider, 'finished')
self.assertEqual(ext.stats._stats, {})
class StatsCollectorTest(unittest.TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('foo')
def test_collector(self):
stats = StatsCollector(self.crawler)
self.assertEqual(stats.get_stats(), {})
self.assertEqual(stats.get_value('anything'), None)
self.assertEqual(stats.get_value('anything', 'default'), 'default')
stats.set_value('test', 'value')
self.assertEqual(stats.get_stats(), {'test': 'value'})
stats.set_value('test2', 23)
self.assertEqual(stats.get_stats(), {'test': 'value', 'test2': 23})
self.assertEqual(stats.get_value('test2'), 23)
stats.inc_value('test2')
self.assertEqual(stats.get_value('test2'), 24)
stats.inc_value('test2', 6)
self.assertEqual(stats.get_value('test2'), 30)
stats.max_value('test2', 6)
self.assertEqual(stats.get_value('test2'), 30)
stats.max_value('test2', 40)
self.assertEqual(stats.get_value('test2'), 40)
stats.max_value('test3', 1)
self.assertEqual(stats.get_value('test3'), 1)
stats.min_value('test2', 60)
self.assertEqual(stats.get_value('test2'), 40)
stats.min_value('test2', 35)
self.assertEqual(stats.get_value('test2'), 35)
stats.min_value('test4', 7)
self.assertEqual(stats.get_value('test4'), 7)
def test_dummy_collector(self):
stats = DummyStatsCollector(self.crawler)
self.assertEqual(stats.get_stats(), {})
self.assertEqual(stats.get_value('anything'), None)
self.assertEqual(stats.get_value('anything', 'default'), 'default')
stats.set_value('test', 'value')
stats.inc_value('v1')
stats.max_value('v2', 100)
stats.min_value('v3', 100)
stats.open_spider('a')
stats.set_value('test', 'value', spider=self.spider)
self.assertEqual(stats.get_stats(), {})
self.assertEqual(stats.get_stats('a'), {})
| 40.373737
| 75
| 0.647736
|
331eba5df083c980430ad023f100803cd734e285
| 541
|
py
|
Python
|
Chapter05-old/Exercise5_12/Exercise5_12.py
|
PacktWorkshops/The-Spark-Workshop
|
f5b052b67d3aaf805eb48c9958c0dcfd237cc841
|
[
"MIT"
] | 7
|
2019-11-11T13:17:04.000Z
|
2021-01-18T22:09:44.000Z
|
Chapter05-old/Exercise5_12/Exercise5_12.py
|
PacktWorkshops/The-Spark-Workshop
|
f5b052b67d3aaf805eb48c9958c0dcfd237cc841
|
[
"MIT"
] | null | null | null |
Chapter05-old/Exercise5_12/Exercise5_12.py
|
PacktWorkshops/The-Spark-Workshop
|
f5b052b67d3aaf805eb48c9958c0dcfd237cc841
|
[
"MIT"
] | 12
|
2020-04-20T16:23:51.000Z
|
2021-07-07T20:37:45.000Z
|
spark = SparkSession \
.builder \
.appName("exercise_twelve") \
.getOrCreate()
csv_pyspark_df1 = spark.read.csv("hdfs://user/your_user_name/data/csv/userdata1.csv"
, sep=","
, inferSchema="true"
, header="true")
csv_pyspark_df2 = spark.read.format("csv").load("hdfs://user/your_user_name/data/csv/userdata1.csv"
, sep=","
, inferSchema="true"
, header="true")
csv_pyspark_df3 = spark.read.load("hdfs://user/your_user_name/data/csv/userdata1.csv"
, format="csv"
, sep=","
, inferSchema="true"
, header="true")
| 25.761905
| 99
| 0.674677
|
8754dea24ffb22ca62882b7fc2dd04f44630168b
| 645
|
py
|
Python
|
epidemic_model/sir.py
|
innovator-zero/Python
|
f776eb081c6688c2f5a98b0050b33582c1769391
|
[
"Apache-2.0"
] | null | null | null |
epidemic_model/sir.py
|
innovator-zero/Python
|
f776eb081c6688c2f5a98b0050b33582c1769391
|
[
"Apache-2.0"
] | 3
|
2020-03-09T03:40:53.000Z
|
2020-11-12T15:36:03.000Z
|
epidemic_model/sir.py
|
innovator-zero/Python
|
f776eb081c6688c2f5a98b0050b33582c1769391
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
N = 1e7
T = 70
s = np.zeros([T])
i = np.zeros([T])
r = np.zeros([T])
lamda = 1.0
gamma = 0.5
i[0] = 45.0 / N
s[0] = 1 - i[0]
for t in range(T - 1):
i[t + 1] = i[t] + i[t] * lamda * s[t] - gamma * i[t]
s[t + 1] = s[t] - lamda * s[t] * i[t]
r[t + 1] = r[t] + gamma * i[t]
fix, ax = plt.subplots(figsize=(8, 4))
ax.plot(s, c='b', lw=2, label='S')
ax.plot(i, c='r', lw=2, label='I')
ax.plot(r, c='g', lw=2, label='R')
ax.set_xlabel('Day', fontsize=20)
ax.set_ylabel('Infective Ratio', fontsize=20)
ax.grid(1)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend()
plt.show()
| 20.15625
| 56
| 0.56124
|
4bd91695f59fe3e20d9f882be9f1f9ae627024a4
| 5,413
|
py
|
Python
|
ivy/functional/backends/torch/linear_algebra.py
|
uchennayah/ivy-1
|
ec422d1f135db54f64df444ed32186e8a31c4599
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/torch/linear_algebra.py
|
uchennayah/ivy-1
|
ec422d1f135db54f64df444ed32186e8a31c4599
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/torch/linear_algebra.py
|
uchennayah/ivy-1
|
ec422d1f135db54f64df444ed32186e8a31c4599
|
[
"Apache-2.0"
] | null | null | null |
# global
import torch
from typing import Union, Optional, Tuple, Literal, List
from collections import namedtuple
# local
import ivy as _ivy
from ivy import inf
from collections import namedtuple
import ivy as _ivy
# Array API Standard #
# -------------------#
def eigh(x: torch.Tensor)\
->torch.Tensor:
return torch.linalg.eigh(x)
def inv(x):
return torch.inverse(x)
def pinv(x: torch.Tensor,
rtol: Optional[Union[float, Tuple[float]]] = None) \
-> torch.Tensor:
if rtol is None:
return torch.linalg.pinv(x)
return torch.linalg.pinv(x, rtol)
def cholesky(x):
return torch.linalg.cholesky(x)
def matrix_transpose(x: torch.Tensor)\
-> torch.Tensor:
return torch.swapaxes(x, -1, -2)
def vector_norm(x: torch.Tensor,
p: Union[int, float, Literal[inf, - inf]] = 2,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False)\
-> torch.Tensor:
py_normalized_vector = torch.linalg.vector_norm(x, p, axis, keepdims)
if py_normalized_vector.shape == ():
return torch.unsqueeze(py_normalized_vector, 0)
return py_normalized_vector
def matrix_norm(x, p=2, axes=None, keepdims=False):
axes = [-2, -1] if axes is None else axes
if isinstance(axes, int):
raise Exception('if specified, axes must be a length-2 sequence of ints,'
'but found {} of type {}'.format(axes, type(axes)))
ret = torch.linalg.matrix_norm(x, ord=p, dim=axes, keepdim=keepdims)
if ret.shape == ():
return torch.unsqueeze(ret, 0)
return ret
# noinspection PyPep8Naming
def svd(x:torch.Tensor,full_matrices: bool = True) -> Union[torch.Tensor, Tuple[torch.Tensor,...]]:
results=namedtuple("svd", "U S Vh")
U, D, VT = torch.linalg.svd(x, full_matrices=full_matrices)
res=results(U, D, VT)
return res
def outer(x1: torch.Tensor,
x2: torch.Tensor)\
-> torch.Tensor:
return torch.outer(x1, x2)
def diagonal(x: torch.Tensor,
offset: int = 0,
axis1: int = -2,
axis2: int = -1) -> torch.Tensor:
return torch.diagonal(x, offset=offset, dim1=axis1, dim2=axis2)
def svdvals(x: torch.Tensor) -> torch.Tensor:
return torch.linalg.svdvals(x)
def qr(x: torch.Tensor,
mode: str = 'reduced') -> namedtuple('qr', ['Q', 'R']):
res = namedtuple('qr', ['Q', 'R'])
if mode == 'reduced':
q, r = torch.qr(x, some=True)
return res(q, r)
elif mode == 'complete':
q, r = torch.qr(x, some=False)
return res(q, r)
else:
raise Exception("Only 'reduced' and 'complete' qr modes are allowed for the torch backend.")
def matmul(x1: torch.Tensor,
x2: torch.Tensor) -> torch.Tensor:
dtype_from = torch.promote_types(x1.dtype, x2.dtype)
x1 = x1.type(dtype_from)
x2 = x2.type(dtype_from)
ret = torch.matmul(x1, x2)
return ret.type(dtype_from)
def slogdet(x:Union[_ivy.Array,_ivy.NativeArray],full_matrices: bool = True) -> Union[_ivy.Array, Tuple[_ivy.Array,...]]:
results = namedtuple("slogdet", "sign logabsdet")
sign, logabsdet = torch.linalg.slogdet(x)
res = results(sign, logabsdet)
return res
def tensordot(x1: torch.Tensor, x2: torch.Tensor,
axes: Union[int, Tuple[List[int], List[int]]] = 2) \
-> torch.Tensor:
# find the type to promote to
dtype = torch.promote_types(x1.dtype, x2.dtype)
# type conversion to one that torch.tensordot can work with
x1, x2 = x1.type(torch.float32), x2.type(torch.float32)
# handle tensordot for axes==0
# otherwise call with axes
if axes == 0:
return (x1.reshape(x1.size() + (1,) * x2.dim()) * x2).type(dtype)
return torch.tensordot(x1, x2, dims=axes).type(dtype)
def trace(x: torch.Tensor,
offset: int = 0)\
-> torch.Tensor:
return torch.trace(x, offset)
def det(A:torch.Tensor) \
-> torch.Tensor:
return torch.linalg.det(A)
def cholesky(x: torch.Tensor,
upper: bool = False) -> torch.Tensor:
if not upper:
return torch.linalg.cholesky(x)
else:
return torch.transpose(torch.linalg.cholesky(torch.transpose(x, dim0=len(x.shape) - 1,dim1=len(x.shape) - 2)),
dim0=len(x.shape) - 1, dim1=len(x.shape) - 2)
def eigvalsh(x: torch.Tensor) -> torch.Tensor:
return torch.linalg.eigvalsh(x)
def cross (x1: torch.Tensor,
x2: torch.Tensor,
axis:int = -1) -> torch.Tensor:
if axis == None:
axis = -1
dtype_from = torch.promote_types(x1.dtype, x2.dtype)
x1 = x1.type(dtype_from)
x2 = x2.type(dtype_from)
return torch.cross(input = x1, other = x2, dim=axis)
# Extra #
# ------#
def vector_to_skew_symmetric_matrix(vector: torch.Tensor)\
-> torch.Tensor:
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = torch.unsqueeze(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = torch.zeros(batch_shape + [1, 1], device=vector.device)
# BS x 1 x 3
row1 = torch.cat((zs, -a3s, a2s), -1)
row2 = torch.cat((a3s, zs, -a1s), -1)
row3 = torch.cat((-a2s, a1s, zs), -1)
# BS x 3 x 3
return torch.cat((row1, row2, row3), -2)
| 28.640212
| 121
| 0.608904
|
d99b09841f846c7730a7e10de99bf01edcfa0c0e
| 5,208
|
py
|
Python
|
qiskit_nature/results/eigenstate_result.py
|
jschuhmac/qiskit-nature
|
b8b1181d951cf8fa76fe0db9e5ea192dad5fb186
|
[
"Apache-2.0"
] | 132
|
2021-01-28T14:51:11.000Z
|
2022-03-25T21:10:47.000Z
|
qiskit_nature/results/eigenstate_result.py
|
jschuhmac/qiskit-nature
|
b8b1181d951cf8fa76fe0db9e5ea192dad5fb186
|
[
"Apache-2.0"
] | 449
|
2021-01-28T19:57:43.000Z
|
2022-03-31T17:01:50.000Z
|
qiskit_nature/results/eigenstate_result.py
|
jschuhmac/qiskit-nature
|
b8b1181d951cf8fa76fe0db9e5ea192dad5fb186
|
[
"Apache-2.0"
] | 109
|
2021-01-28T13:17:46.000Z
|
2022-03-30T23:53:39.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Eigenstate results module."""
from typing import Optional, List, Tuple, Union
import inspect
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Instruction
from qiskit.quantum_info import Statevector
from qiskit.result import Result
from qiskit.algorithms import AlgorithmResult
from qiskit.opflow import OperatorBase
from qiskit_nature import ListOrDictType
class EigenstateResult(AlgorithmResult):
"""The eigenstate result interface."""
def __init__(self) -> None:
super().__init__()
self._eigenenergies: Optional[np.ndarray] = None
self._eigenstates: Optional[
List[
Union[
str,
dict,
Result,
list,
np.ndarray,
Statevector,
QuantumCircuit,
Instruction,
OperatorBase,
]
]
] = None
self._aux_operator_eigenvalues: Optional[
List[ListOrDictType[Tuple[complex, complex]]]
] = None
self._raw_result: Optional[AlgorithmResult] = None
@property
def eigenenergies(self) -> Optional[np.ndarray]:
"""returns eigen energies"""
return self._eigenenergies
@eigenenergies.setter
def eigenenergies(self, value: np.ndarray) -> None:
"""set eigen energies"""
self._eigenenergies = value
@property
def eigenstates(
self,
) -> Optional[
List[
Union[
str,
dict,
Result,
list,
np.ndarray,
Statevector,
QuantumCircuit,
Instruction,
OperatorBase,
]
]
]:
"""returns eigen states"""
return self._eigenstates
@eigenstates.setter
def eigenstates(
self,
value: List[
Union[
str,
dict,
Result,
list,
np.ndarray,
Statevector,
QuantumCircuit,
Instruction,
OperatorBase,
]
],
) -> None:
"""set eigen states"""
self._eigenstates = value
@property
def groundenergy(self) -> Optional[float]:
"""returns ground energy"""
energies = self.eigenenergies
if isinstance(energies, np.ndarray) and not energies.size:
return energies[0].real
return None
@property
def groundstate(
self,
) -> Optional[
Union[
str,
dict,
Result,
list,
np.ndarray,
Statevector,
QuantumCircuit,
Instruction,
OperatorBase,
]
]:
"""returns ground state"""
states = self.eigenstates
if states:
return states[0]
return None
@property
def aux_operator_eigenvalues(self) -> Optional[List[ListOrDictType[Tuple[complex, complex]]]]:
"""return aux operator eigen values"""
return self._aux_operator_eigenvalues
@aux_operator_eigenvalues.setter
def aux_operator_eigenvalues(
self, value: List[ListOrDictType[Tuple[complex, complex]]]
) -> None:
"""set aux operator eigen values"""
self._aux_operator_eigenvalues = value
@property
def raw_result(self) -> Optional[AlgorithmResult]:
"""Returns the raw algorithm result."""
return self._raw_result
@raw_result.setter
def raw_result(self, result: AlgorithmResult) -> None:
self._raw_result = result
def combine(self, result: AlgorithmResult) -> None:
"""
Any property from the argument that exists in the receiver is
updated.
Args:
result: Argument result with properties to be set.
Raises:
TypeError: Argument is None
"""
if result is None:
raise TypeError("Argument result expected.")
if result == self:
return
# find any result public property that exists in the receiver
for name, value in inspect.getmembers(result):
if (
not name.startswith("_")
and not inspect.ismethod(value)
and not inspect.isfunction(value)
and hasattr(self, name)
):
try:
setattr(self, name, value)
except AttributeError:
# some attributes may be read only
pass
| 28.459016
| 98
| 0.5553
|
6a349dc74cf94f513b8fba69e81e122f8967f2d5
| 1,059
|
py
|
Python
|
microsoft_atp/komand_microsoft_atp/triggers/get_alert_matching_key/trigger.py
|
emartin-merrill-r7/insightconnect-plugins
|
a589745dbcc9f01d3e601431e77ab7221a84c117
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
microsoft_atp/komand_microsoft_atp/triggers/get_alert_matching_key/trigger.py
|
OSSSP/insightconnect-plugins
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
[
"MIT"
] | null | null | null |
microsoft_atp/komand_microsoft_atp/triggers/get_alert_matching_key/trigger.py
|
OSSSP/insightconnect-plugins
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
[
"MIT"
] | null | null | null |
import komand
import time
from .schema import GetAlertMatchingKeyInput, GetAlertMatchingKeyOutput
# Custom imports below
class GetAlertMatchingKey(komand.Trigger):
def __init__(self):
super(self.__class__, self).__init__(
name='get_alert_matching_key',
description='Get alerts that match a given key to its value',
input=GetAlertMatchingKeyInput(),
output=GetAlertMatchingKeyOutput())
def run(self, params={}):
"""Run the trigger"""
while True:
key = params["key"]
value = params["value"]
matching_alerts = self.connection.get_alerts_by_key_value(key, value)
if(len(matching_alerts) > 0):
self.send({"results": matching_alerts})
time.sleep(params.get("interval", 5))
def test(self):
# This will raise an exception for failure cases
self.connection.test()
# No exception raised, pass back json
return {"results": self.connection.fake_alert()}
| 30.257143
| 81
| 0.622285
|
e1ec38f0d1745b6539e47d891d6a2a9014d33393
| 17,324
|
py
|
Python
|
lib/alchemy/__init__.py
|
nachogentile/PatentsProcessor
|
43ae7a727d3368811c828eeca6d6e484af8cb2f6
|
[
"BSD-2-Clause"
] | 14
|
2015-09-22T12:58:50.000Z
|
2020-07-22T14:05:41.000Z
|
lib/alchemy/__init__.py
|
nachogentile/PatentsProcessor
|
43ae7a727d3368811c828eeca6d6e484af8cb2f6
|
[
"BSD-2-Clause"
] | 1
|
2016-05-31T18:25:32.000Z
|
2016-05-31T18:25:32.000Z
|
lib/alchemy/__init__.py
|
nachogentile/PatentsProcessor
|
43ae7a727d3368811c828eeca6d6e484af8cb2f6
|
[
"BSD-2-Clause"
] | 16
|
2015-09-30T03:24:43.000Z
|
2020-07-30T03:00:16.000Z
|
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro gt.fierro@berkeley.edu github.com/gtfierro
"""
"""
Helper functions for database-related functionality.
"""
import os
import re
import ConfigParser
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql import exists
from collections import defaultdict
import schema
from match import *
import uuid
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
from HTMLParser import HTMLParser
h = HTMLParser()
def unescape_html(x):
return h.unescape(x)
import htmlentitydefs
_char = re.compile(r'&(\w+?);')
# Generate some extra HTML entities
defs=htmlentitydefs.entitydefs
defs['apos'] = "'"
entities = open('htmlentities').read().split('\n')
for e in entities:
try:
first = re.sub('\s+|\"|;|&','',e[3:15])
second = re.sub('\s+|\"|;|&','',e[15:24])
define = re.search("(?<=\s\s\').*?$",e).group()
defs[first] = define[:-1].encode('utf-8')
defs[second] = define[:-1].encode('utf-8')
except:
pass
def _char_unescape(m, defs=defs):
try:
return defs[m.group(1)].encode('utf-8','ignore')
except:
return m.group()
def fixid(x):
if 'id' in x:
x['id'] = str(uuid.uuid4())
elif 'uuid' in x:
x['uuid'] = str(uuid.uuid4())
return x
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
"""
This keeps the database connection alive over long-running processes (like assignee and location disambiguations)
"""
cursor = dbapi_connection.cursor()
if not hasattr(cursor, 'MySQLError'):
return
try:
# reset the connection settings
cursor.execute("SELECT 1;")
if is_mysql():
cursor.execute("set foreign_key_checks = 0; set unique_checks = 0;")
except:
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()
def is_mysql():
"""
Returns True if currently connected to a MySQL database. Given that our only two options
are MySQL and SQLite, we use this function to determien when we can use certain functions
like `set foreign_key_checks = 0` and `truncate <tablaneme>`.
"""
config = get_config()
return config.get('global').get('database') == 'mysql'
def get_config(localfile="config.ini", default_file=True):
"""
This grabs a configuration file and converts it into
a dictionary.
The default filename is called config.ini
First we load the global file, then we load a local file
"""
if default_file:
openfile = "{0}/config.ini".format(os.path.dirname(os.path.realpath(__file__)))
else:
openfile = localfile
config = defaultdict(dict)
if os.path.isfile(openfile):
cfg = ConfigParser.ConfigParser()
cfg.read(openfile)
for s in cfg.sections():
for k, v in cfg.items(s):
dec = re.compile(r'^\d+(\.\d+)?$')
if v in ("True", "False") or v.isdigit() or dec.match(v):
v = eval(v)
config[s][k] = v
# this enables us to load a local file
if default_file:
newconfig = get_config(localfile, default_file=False)
for section in newconfig:
for item in newconfig[section]:
config[section][item] = newconfig[section][item]
return config
def session_generator(db=None, dbtype='grant'):
"""
Read from config.ini file and load appropriate database
@db: string describing database, e.g. "sqlite" or "mysql"
@dbtype: string indicating if we are fetching the session for
the grant database or the application database
session_generator will return an object taht can be called
to retrieve more sessions, e.g.
sg = session_generator(dbtype='grant')
session1 = sg()
session2 = sg()
etc.
These sessions will be protected with the ping refresher above
"""
config = get_config()
echo = config.get('global').get('echo')
if not db:
db = config.get('global').get('database')
if db[:6] == "sqlite":
sqlite_db_path = os.path.join(
config.get(db).get('path'),
config.get(db).get('{0}-database'.format(dbtype)))
if os.path.basename(os.getcwd()) == 'lib':
sqlite_db_path = '../' + sqlite_db_path
engine = create_engine('sqlite:///{0}'.format(sqlite_db_path), echo=echo, echo_pool=True)
else:
engine = create_engine('mysql+mysqldb://{0}:{1}@{2}/{3}?charset=utf8'.format(
config.get(db).get('user'),
config.get(db).get('password'),
config.get(db).get('host'),
config.get(db).get('{0}-database'.format(dbtype)), echo=echo), pool_size=3, pool_recycle=3600, echo_pool=True)
if dbtype == 'grant':
schema.GrantBase.metadata.create_all(engine)
else:
schema.ApplicationBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine, _enable_transaction_accounting=False)
return scoped_session(Session)
def fetch_session(db=None, dbtype='grant'):
"""
Read from config.ini file and load appropriate database
@db: string describing database, e.g. "sqlite" or "mysql"
@dbtype: string indicating if we are fetching the session for
the grant database or the application database
"""
config = get_config()
echo = config.get('global').get('echo')
if not db:
db = config.get('global').get('database')
if db[:6] == "sqlite":
sqlite_db_path = os.path.join(
config.get(db).get('path'),
config.get(db).get('{0}-database'.format(dbtype)))
engine = create_engine('sqlite:///{0}'.format(sqlite_db_path), echo=echo)
else:
engine = create_engine('mysql+mysqldb://{0}:{1}@{2}/{3}?charset=utf8'.format(
config.get(db).get('user'),
config.get(db).get('password'),
config.get(db).get('host'),
config.get(db).get('{0}-database'.format(dbtype)), echo=echo))
if dbtype == 'grant':
schema.GrantBase.metadata.create_all(engine)
else:
schema.ApplicationBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine, _enable_transaction_accounting=False)
session = Session()
return session
def add_grant(obj, override=True, temp=False):
"""
PatentGrant Object converting to tables via SQLAlchemy
Necessary to convert dates to datetime because of SQLite (OK on MySQL)
Case Sensitivity and Table Reflection
MySQL has inconsistent support for case-sensitive identifier names,
basing support on specific details of the underlying operating system.
However, it has been observed that no matter what case sensitivity
behavior is present, the names of tables in foreign key declarations
are always received from the database as all-lower case, making it
impossible to accurately reflect a schema where inter-related tables
use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as all
lower case both within SQLAlchemy as well as on the MySQL database
itself, especially if database reflection features are to be used.
"""
# if a patent exists, remove it so we can replace it
(patent_exists, ), = grantsession.query(exists().where(schema.Patent.number == obj.patent))
#pat_query = grantsession.query(Patent).filter(Patent.number == obj.patent)
#if pat_query.count():
if patent_exists:
if override:
pat_query = grantsession.query(schema.Patent).filter(schema.Patent.id == obj.patent)
grantsession.delete(pat_query.one())
else:
return
if len(obj.pat["number"]) < 3:
return
pat = schema.Patent(**obj.pat)
pat.application = schema.Application(**obj.app)
# lots of abstracts seem to be missing. why?
add_all_fields(obj, pat)
if is_mysql():
grantsession.execute('set foreign_key_checks = 0;')
grantsession.execute('set unique_checks = 0;')
#grantsession.commit()
grantsession.merge(pat)
def add_all_fields(obj, pat):
add_asg(obj, pat)
add_inv(obj, pat)
add_law(obj, pat)
add_usreldoc(obj, pat)
add_classes(obj, pat)
add_ipcr(obj, pat)
add_citations(obj, pat)
add_claims(obj, pat)
add_current_classes(obj, pat)
def add_asg(obj, pat):
for asg, loc in obj.assignee_list:
asg = fixid(asg)
asg['organization'] = unescape_html(asg['organization'])
loc = fixid(loc)
asg = schema.RawAssignee(**asg)
loc = schema.RawLocation(**loc)
grantsession.merge(loc)
asg.rawlocation = loc
pat.rawassignees.append(asg)
def add_inv(obj, pat):
for inv, loc in obj.inventor_list:
inv = fixid(inv)
loc = fixid(loc)
inv = schema.RawInventor(**inv)
loc = schema.RawLocation(**loc)
grantsession.merge(loc)
inv.rawlocation = loc
pat.rawinventors.append(inv)
def add_law(obj, pat):
for law in obj.lawyer_list:
law = fixid(law)
law = schema.RawLawyer(**law)
pat.rawlawyers.append(law)
def add_usreldoc(obj, pat):
for usr in obj.us_relation_list:
usr = fixid(usr)
usr["rel_id"] = usr["number"]
usr = schema.USRelDoc(**usr)
pat.usreldocs.append(usr)
def add_classes(obj, pat):
for uspc, mc, sc in obj.us_classifications:
uspc = fixid(uspc)
uspc = schema.USPC(**uspc)
mc = schema.MainClass(**mc)
sc = schema.SubClass(**sc)
grantsession.merge(mc)
grantsession.merge(sc)
uspc.mainclass = mc
uspc.subclass = sc
pat.classes.append(uspc)
def add_current_classes(obj, pat):
for uspc_current, mc, sc in obj.us_classifications:
uspc_current = fixid(uspc_current)
uspc_current = schema.USPC_current(**uspc_current)
mc = schema.MainClass_current(**mc)
sc = schema.SubClass_current(**sc)
grantsession.merge(mc)
grantsession.merge(sc)
uspc_current.mainclass_current = mc
uspc_current.subclass_current = sc
pat.current_classes.append(uspc_current)
def add_ipcr(obj, pat):
for ipc in obj.ipcr_classifications:
ipc = schema.IPCR(**ipc)
pat.ipcrs.append(ipc)
def add_citations(obj, pat):
cits, refs = obj.citation_list
for cit in cits:
if cit['country'] == 'US':
# granted patent doc number
if re.match(r'^[A-Z]*\d+$', cit['number']):
cit['citation_id'] = cit['number']
cit = fixid(cit)
cit = schema.USPatentCitation(**cit)
pat.uspatentcitations.append(cit)
# if not above, it's probably an application
else:
cit['application_id'] = cit['number']
cit = fixid(cit)
cit = schema.USApplicationCitation(**cit)
pat.usapplicationcitations.append(cit)
# if not US, then foreign citation
else:
cit = fixid(cit)
cit = schema.ForeignCitation(**cit)
pat.foreigncitations.append(cit)
for ref in refs:
ref = fixid(ref)
ref = schema.OtherReference(**ref)
pat.otherreferences.append(ref)
def add_claims(obj, pat):
claims = obj.claims
for claim in claims:
claim = fixid(claim)
claim['text'] = unescape_html(claim['text'])
claim['text'] = _char.sub(_char_unescape,claim['text'])
clm = schema.Claim(**claim)
pat.claims.append(clm)
def commit():
try:
grantsession.commit()
except Exception, e:
grantsession.rollback()
print str(e)
def add_application(obj, override=True, temp=False):
"""
PatentApplication Object converting to tables via SQLAlchemy
Necessary to convert dates to datetime because of SQLite (OK on MySQL)
Case Sensitivity and Table Reflection
MySQL has inconsistent support for case-sensitive identifier names,
basing support on specific details of the underlying operating system.
However, it has been observed that no matter what case sensitivity
behavior is present, the names of tables in foreign key declarations
are always received from the database as all-lower case, making it
impossible to accurately reflect a schema where inter-related tables
use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as all
lower case both within SQLAlchemy as well as on the MySQL database
itself, especially if database reflection features are to be used.
"""
# if the application exists, remove it so we can replace it
(app_exists, ), = appsession.query(exists().where(schema.App_Application.number == obj.application))
if app_exists:
if override:
app_query = appsession.query(schema.App_Application).filter(schema.App_Application.number == obj.application)
appsession.delete(app_query.one())
else:
return
if len(obj.app["number"]) < 3:
return
app = schema.App_Application(**obj.app)
# lots of abstracts seem to be missing. why?
add_all_app_fields(obj, app)
appsession.merge(app)
def add_all_app_fields(obj, app):
add_app_asg(obj, app)
add_app_inv(obj, app)
add_app_classes(obj, app)
add_app_claims(obj, app)
add_app_current_classes(obj, app)
def add_app_asg(obj, app):
for asg, loc in obj.assignee_list:
loc = fixid(loc)
asg = fixid(asg)
asg['organization'] = unescape_html(asg['organization'])
asg = schema.App_RawAssignee(**asg)
loc = schema.App_RawLocation(**loc)
appsession.merge(loc)
asg.rawlocation = loc
app.rawassignees.append(asg)
def add_app_inv(obj, app):
for inv, loc in obj.inventor_list:
loc = fixid(loc)
inv = fixid(inv)
inv = schema.App_RawInventor(**inv)
loc = schema.App_RawLocation(**loc)
appsession.merge(loc)
inv.rawlocation = loc
app.rawinventors.append(inv)
def add_app_classes(obj, app):
for uspc, mc, sc in obj.us_classifications:
uspc = fixid(uspc)
uspc = schema.App_USPC(**uspc)
mc = schema.App_MainClass(**mc)
sc = schema.App_SubClass(**sc)
appsession.merge(mc)
appsession.merge(sc)
uspc.mainclass = mc
uspc.subclass = sc
app.classes.append(uspc)
def add_app_current_classes(obj, app):
for uspc_current, mc, sc in obj.us_classifications:
uspc_current = fixid(uspc_current)
uspc_current = schema.App_USPC_current(**uspc_current)
mc = schema.App_MainClass_current(**mc)
sc = schema.App_SubClass_current(**sc)
appsession.merge(mc)
appsession.merge(sc)
uspc_current.mainclass_current = mc
uspc_current.subclass_current = sc
app.current_classes.append(uspc_current)
def add_app_ipcr(obj, app):
for ipc in obj.ipcr_classifications:
ipc = schema.App_IPCR(**ipc)
app.ipcrs.append(ipc)
def add_app_claims(obj, app):
claims = obj.claims
for claim in claims:
claim = fixid(claim)
claim['text'] = unescape_html(claim['text'])
claim['text'] = _char.sub(_char_unescape,claim['text'])
clm = schema.App_Claim(**claim)
app.claims.append(clm)
def commit_application():
try:
appsession.commit()
except Exception, e:
appsession.rollback()
print str(e)
grantsession = fetch_session(dbtype='grant')
appsession = fetch_session(dbtype='application')
session = grantsession # default for clean and consolidate
| 33.968627
| 122
| 0.655795
|
01b5f39d47070301708153f824dcc458c568b728
| 4,207
|
py
|
Python
|
faster-rcnn.pytorch/lib/datasets/vg_eval.py
|
sadjadasghari/3d-vehicle-tracking
|
af05d52be81db32fc6a21bf60a757ebc46557998
|
[
"BSD-3-Clause"
] | 603
|
2019-05-28T01:53:29.000Z
|
2022-03-22T00:26:07.000Z
|
faster-rcnn.pytorch/lib/datasets/vg_eval.py
|
reinforcementdriving/3d-vehicle-tracking
|
f8433f72a51dd1a7190570e63e9fda4a924a81f0
|
[
"BSD-3-Clause"
] | 45
|
2019-05-29T05:07:20.000Z
|
2022-01-28T04:10:42.000Z
|
faster-rcnn.pytorch/lib/datasets/vg_eval.py
|
reinforcementdriving/3d-vehicle-tracking
|
f8433f72a51dd1a7190570e63e9fda4a924a81f0
|
[
"BSD-3-Clause"
] | 149
|
2019-05-28T06:53:12.000Z
|
2022-03-24T19:11:58.000Z
|
from __future__ import absolute_import
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
import numpy as np
from .voc_eval import voc_ap
def vg_eval(detpath,
gt_roidb,
image_index,
classindex,
ovthresh=0.5,
use_07_metric=False,
eval_attributes=False):
"""rec, prec, ap, sorted_scores, npos = voc_eval(
detpath,
gt_roidb,
image_index,
classindex,
[ovthresh],
[use_07_metric])
Top level function that does the Visual Genome evaluation.
detpath: Path to detections
gt_roidb: List of ground truth structs.
image_index: List of image ids.
classindex: Category index
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# extract gt objects for this class
class_recs = {}
npos = 0
for item, imagename in zip(gt_roidb, image_index):
if eval_attributes:
bbox = item['boxes'][np.where(
np.any(item['gt_attributes'].toarray() == classindex, axis=1))[
0], :]
else:
bbox = item['boxes'][np.where(item['gt_classes'] == classindex)[0],
:]
difficult = np.zeros((bbox.shape[0],)).astype(np.bool)
det = [False] * bbox.shape[0]
npos = npos + sum(~difficult)
class_recs[str(imagename)] = {'bbox': bbox,
'difficult': difficult,
'det': det}
if npos == 0:
# No ground truth examples
return 0, 0, 0, 0, npos
# read dets
with open(detpath, 'r') as f:
lines = f.readlines()
if len(lines) == 0:
# No detection examples
return 0, 0, 0, 0, npos
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = -np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap, sorted_scores, npos
| 33.388889
| 79
| 0.487758
|
39609cfa1b5b828e6e549c4acdf99456b70281cb
| 21,580
|
py
|
Python
|
bot_project/buzzbot/stripAndConvertHTMLTags.py
|
pbarton666/buzz_bot
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
[
"MIT"
] | null | null | null |
bot_project/buzzbot/stripAndConvertHTMLTags.py
|
pbarton666/buzz_bot
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
[
"MIT"
] | null | null | null |
bot_project/buzzbot/stripAndConvertHTMLTags.py
|
pbarton666/buzz_bot
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
[
"MIT"
] | null | null | null |
#scrubHTML.py
#Produces a hash table to convert strange HTML tags and unicode symbols to
#something more readable. This can be maintained manually or build automoatically
#from Pat 's spreadsheet hashBash1.xls)
def makeHashTable():
myTable = []
newEntry = ('&', '&')
myTable.append(newEntry)
newEntry = (' ', ' ')
myTable.append(newEntry)
newEntry = (' ', ' ')
myTable.append(newEntry)
newEntry = (' ', ' ')
myTable.append(newEntry)
newEntry = ('¡', ' ')
myTable.append(newEntry)
newEntry = ('¡', ' ')
myTable.append(newEntry)
newEntry = ('¡', ' ')
myTable.append(newEntry)
newEntry = ('¢', '(cents)')
myTable.append(newEntry)
newEntry = ('¢', '(cents)')
myTable.append(newEntry)
newEntry = ('¢', '(cents)')
myTable.append(newEntry)
newEntry = ('£', '(pounds)')
myTable.append(newEntry)
newEntry = ('£', '(pounds)')
myTable.append(newEntry)
newEntry = ('£', '(pounds)')
myTable.append(newEntry)
newEntry = ('¤', '')
myTable.append(newEntry)
newEntry = ('¤', '')
myTable.append(newEntry)
newEntry = ('¤', '')
myTable.append(newEntry)
newEntry = ('¥', '(yen)')
myTable.append(newEntry)
newEntry = ('¥', '(yen)')
myTable.append(newEntry)
newEntry = ('¥', '(yen)')
myTable.append(newEntry)
newEntry = ('¦', '')
myTable.append(newEntry)
newEntry = ('¦', '')
myTable.append(newEntry)
newEntry = ('¦', '')
myTable.append(newEntry)
newEntry = ('—', '-')
myTable.append(newEntry)
newEntry = ('§', '(section)')
myTable.append(newEntry)
newEntry = ('§', '(section)')
myTable.append(newEntry)
newEntry = ('§', '(section)')
myTable.append(newEntry)
newEntry = ('¨', '')
myTable.append(newEntry)
newEntry = ('¨', '')
myTable.append(newEntry)
newEntry = ('¨', '')
myTable.append(newEntry)
newEntry = ('©', '(c)')
myTable.append(newEntry)
newEntry = ('©', '(c)')
myTable.append(newEntry)
newEntry = ('©', '(c)')
myTable.append(newEntry)
newEntry = ('ª', 'th')
myTable.append(newEntry)
newEntry = ('ª', 'th')
myTable.append(newEntry)
newEntry = ('ª', 'th')
myTable.append(newEntry)
newEntry = ('«', '`')
myTable.append(newEntry)
newEntry = ('«', '`')
myTable.append(newEntry)
newEntry = ('«', '`')
myTable.append(newEntry)
newEntry = ('¬', '(not)')
myTable.append(newEntry)
newEntry = ('¬', '(not)')
myTable.append(newEntry)
newEntry = ('¬', '(not)')
myTable.append(newEntry)
newEntry = ('­', '-')
myTable.append(newEntry)
newEntry = ('­', '-')
myTable.append(newEntry)
newEntry = ('­', '-')
myTable.append(newEntry)
newEntry = ('®', '(r)')
myTable.append(newEntry)
newEntry = ('®', '(r)')
myTable.append(newEntry)
newEntry = ('®', '(r)')
myTable.append(newEntry)
newEntry = ('¯', '')
myTable.append(newEntry)
newEntry = ('¯', '')
myTable.append(newEntry)
newEntry = ('¯', '')
myTable.append(newEntry)
newEntry = ('°', '(degrees)')
myTable.append(newEntry)
newEntry = ('°', '(degrees)')
myTable.append(newEntry)
newEntry = ('°', '(degrees)')
myTable.append(newEntry)
newEntry = ('±', '(plus or minus)')
myTable.append(newEntry)
newEntry = ('±', '(plus or minus)')
myTable.append(newEntry)
newEntry = ('±', '(plus or minus)')
myTable.append(newEntry)
newEntry = ('²', '^2')
myTable.append(newEntry)
newEntry = ('²', '^2')
myTable.append(newEntry)
newEntry = ('²', '^2')
myTable.append(newEntry)
newEntry = ('³', '^3')
myTable.append(newEntry)
newEntry = ('³', '^3')
myTable.append(newEntry)
newEntry = ('³', '^3')
myTable.append(newEntry)
newEntry = ('´', '`')
myTable.append(newEntry)
newEntry = ('´', '`')
myTable.append(newEntry)
newEntry = ('´', '`')
myTable.append(newEntry)
newEntry = ('µ', 'mu')
myTable.append(newEntry)
newEntry = ('µ', 'mu')
myTable.append(newEntry)
newEntry = ('µ', 'mu')
myTable.append(newEntry)
newEntry = ('¶', '')
myTable.append(newEntry)
newEntry = ('¶', '')
myTable.append(newEntry)
newEntry = ('¶', '')
myTable.append(newEntry)
newEntry = ('·', '')
myTable.append(newEntry)
newEntry = ('·', '')
myTable.append(newEntry)
newEntry = ('·', '')
myTable.append(newEntry)
newEntry = ('¸', '')
myTable.append(newEntry)
newEntry = ('¸', '')
myTable.append(newEntry)
newEntry = ('¸', '')
myTable.append(newEntry)
newEntry = ('¹', '')
myTable.append(newEntry)
newEntry = ('¹', '')
myTable.append(newEntry)
newEntry = ('¹', '')
myTable.append(newEntry)
newEntry = ('º', 'th')
myTable.append(newEntry)
newEntry = ('º', 'th')
myTable.append(newEntry)
newEntry = ('º', 'th')
myTable.append(newEntry)
newEntry = ('»', '')
myTable.append(newEntry)
newEntry = ('»', '')
myTable.append(newEntry)
newEntry = ('»', '')
myTable.append(newEntry)
newEntry = ('¼', '\1/4')
myTable.append(newEntry)
newEntry = ('¼', '\1/4')
myTable.append(newEntry)
newEntry = ('¼', '\1/4')
myTable.append(newEntry)
newEntry = ('½', '\1/2')
myTable.append(newEntry)
newEntry = ('½', '\1/2')
myTable.append(newEntry)
newEntry = ('½', '\1/2')
myTable.append(newEntry)
newEntry = ('¾', '\3/4')
myTable.append(newEntry)
newEntry = ('¾', '\3/4')
myTable.append(newEntry)
newEntry = ('¾', '\3/4')
myTable.append(newEntry)
newEntry = ('¿', '')
myTable.append(newEntry)
newEntry = ('¿', '')
myTable.append(newEntry)
newEntry = ('¿', '')
myTable.append(newEntry)
newEntry = ('À', 'A')
myTable.append(newEntry)
newEntry = ('À', 'A')
myTable.append(newEntry)
newEntry = ('À', 'A')
myTable.append(newEntry)
newEntry = ('Á', 'A')
myTable.append(newEntry)
newEntry = ('Á', 'A')
myTable.append(newEntry)
newEntry = ('Á', 'A')
myTable.append(newEntry)
newEntry = ('Â', 'A')
myTable.append(newEntry)
newEntry = ('Â', 'A')
myTable.append(newEntry)
newEntry = ('Â', 'A')
myTable.append(newEntry)
newEntry = ('Ã', 'A')
myTable.append(newEntry)
newEntry = ('Ã', 'A')
myTable.append(newEntry)
newEntry = ('Ã', 'A')
myTable.append(newEntry)
newEntry = ('Ä', 'A')
myTable.append(newEntry)
newEntry = ('Ä', 'A')
myTable.append(newEntry)
newEntry = ('Ä', 'A')
myTable.append(newEntry)
newEntry = ('Å', 'A')
myTable.append(newEntry)
newEntry = ('Å', 'A')
myTable.append(newEntry)
newEntry = ('Å', 'A')
myTable.append(newEntry)
newEntry = ('Æ', 'AE')
myTable.append(newEntry)
newEntry = ('Æ', 'AE')
myTable.append(newEntry)
newEntry = ('Æ', 'AE')
myTable.append(newEntry)
newEntry = ('Ç', 'C')
myTable.append(newEntry)
newEntry = ('Ç', 'C')
myTable.append(newEntry)
newEntry = ('Ç', 'C')
myTable.append(newEntry)
newEntry = ('È', 'E')
myTable.append(newEntry)
newEntry = ('È', 'E')
myTable.append(newEntry)
newEntry = ('È', 'E')
myTable.append(newEntry)
newEntry = ('É', 'E')
myTable.append(newEntry)
newEntry = ('É', 'E')
myTable.append(newEntry)
newEntry = ('É', 'E')
myTable.append(newEntry)
newEntry = ('Ê', 'E')
myTable.append(newEntry)
newEntry = ('Ê', 'E')
myTable.append(newEntry)
newEntry = ('Ê', 'E')
myTable.append(newEntry)
newEntry = ('Ë', 'E')
myTable.append(newEntry)
newEntry = ('Ë', 'E')
myTable.append(newEntry)
newEntry = ('Ë', 'E')
myTable.append(newEntry)
newEntry = ('Ì', 'I')
myTable.append(newEntry)
newEntry = ('Ì', 'I')
myTable.append(newEntry)
newEntry = ('Ì', 'I')
myTable.append(newEntry)
newEntry = ('Í', 'I')
myTable.append(newEntry)
newEntry = ('Í', 'I')
myTable.append(newEntry)
newEntry = ('Í', 'I')
myTable.append(newEntry)
newEntry = ('Î', 'I')
myTable.append(newEntry)
newEntry = ('Î', 'I')
myTable.append(newEntry)
newEntry = ('Î', 'I')
myTable.append(newEntry)
newEntry = ('Ï', 'I')
myTable.append(newEntry)
newEntry = ('Ï', 'I')
myTable.append(newEntry)
newEntry = ('Ï', 'I')
myTable.append(newEntry)
newEntry = ('Ð', '')
myTable.append(newEntry)
newEntry = ('Ð', '')
myTable.append(newEntry)
newEntry = ('Ð', '')
myTable.append(newEntry)
newEntry = ('Ñ', 'N')
myTable.append(newEntry)
newEntry = ('Ñ', 'N')
myTable.append(newEntry)
newEntry = ('Ñ', 'N')
myTable.append(newEntry)
newEntry = ('Ò', 'O')
myTable.append(newEntry)
newEntry = ('Ò', 'O')
myTable.append(newEntry)
newEntry = ('Ò', 'O')
myTable.append(newEntry)
newEntry = ('Ó', 'O')
myTable.append(newEntry)
newEntry = ('Ó', 'O')
myTable.append(newEntry)
newEntry = ('Ó', 'O')
myTable.append(newEntry)
newEntry = ('Ô', 'O')
myTable.append(newEntry)
newEntry = ('Ô', 'O')
myTable.append(newEntry)
newEntry = ('Ô', 'O')
myTable.append(newEntry)
newEntry = ('Õ', 'O')
myTable.append(newEntry)
newEntry = ('Õ', 'O')
myTable.append(newEntry)
newEntry = ('Õ', 'O')
myTable.append(newEntry)
newEntry = ('Ö', 'O')
myTable.append(newEntry)
newEntry = ('Ö', 'O')
myTable.append(newEntry)
newEntry = ('Ö', 'O')
myTable.append(newEntry)
newEntry = ('×', '*')
myTable.append(newEntry)
newEntry = ('×', '*')
myTable.append(newEntry)
newEntry = ('×', '*')
myTable.append(newEntry)
newEntry = ('Ø', 'O')
myTable.append(newEntry)
newEntry = ('Ø', 'O')
myTable.append(newEntry)
newEntry = ('Ø', 'O')
myTable.append(newEntry)
newEntry = ('Ù', 'U')
myTable.append(newEntry)
newEntry = ('Ù', 'U')
myTable.append(newEntry)
newEntry = ('Ù', 'U')
myTable.append(newEntry)
newEntry = ('Ú', 'U')
myTable.append(newEntry)
newEntry = ('Ú', 'U')
myTable.append(newEntry)
newEntry = ('Ú', 'U')
myTable.append(newEntry)
newEntry = ('Û', 'U')
myTable.append(newEntry)
newEntry = ('Û', 'U')
myTable.append(newEntry)
newEntry = ('Û', 'U')
myTable.append(newEntry)
newEntry = ('Ü', 'U')
myTable.append(newEntry)
newEntry = ('Ü', 'U')
myTable.append(newEntry)
newEntry = ('Ü', 'U')
myTable.append(newEntry)
newEntry = ('Ý', 'Y')
myTable.append(newEntry)
newEntry = ('Ý', 'Y')
myTable.append(newEntry)
newEntry = ('Ý', 'Y')
myTable.append(newEntry)
newEntry = ('Þ', '')
myTable.append(newEntry)
newEntry = ('Þ', '')
myTable.append(newEntry)
newEntry = ('Þ', '')
myTable.append(newEntry)
newEntry = ('ß', '')
myTable.append(newEntry)
newEntry = ('ß', '')
myTable.append(newEntry)
newEntry = ('ß', '')
myTable.append(newEntry)
newEntry = ('à', 'a')
myTable.append(newEntry)
newEntry = ('à', 'a')
myTable.append(newEntry)
newEntry = ('à', 'a')
myTable.append(newEntry)
newEntry = ('á', 'a')
myTable.append(newEntry)
newEntry = ('á', 'a')
myTable.append(newEntry)
newEntry = ('á', 'a')
myTable.append(newEntry)
newEntry = ('â', 'a')
myTable.append(newEntry)
newEntry = ('â', 'a')
myTable.append(newEntry)
newEntry = ('â', 'a')
myTable.append(newEntry)
newEntry = ('ã', 'a')
myTable.append(newEntry)
newEntry = ('ã', 'a')
myTable.append(newEntry)
newEntry = ('ã', 'a')
myTable.append(newEntry)
newEntry = ('ä', 'a')
myTable.append(newEntry)
newEntry = ('ä', 'a')
myTable.append(newEntry)
newEntry = ('ä', 'a')
myTable.append(newEntry)
newEntry = ('å', 'a')
myTable.append(newEntry)
newEntry = ('å', 'a')
myTable.append(newEntry)
newEntry = ('å', 'a')
myTable.append(newEntry)
newEntry = ('æ', 'ae')
myTable.append(newEntry)
newEntry = ('æ', 'ae')
myTable.append(newEntry)
newEntry = ('æ', 'ae')
myTable.append(newEntry)
newEntry = ('ç', 'c')
myTable.append(newEntry)
newEntry = ('ç', 'c')
myTable.append(newEntry)
newEntry = ('ç', 'c')
myTable.append(newEntry)
newEntry = ('è', 'e')
myTable.append(newEntry)
newEntry = ('è', 'e')
myTable.append(newEntry)
newEntry = ('è', 'e')
myTable.append(newEntry)
newEntry = ('é', 'e')
myTable.append(newEntry)
newEntry = ('é', 'e')
myTable.append(newEntry)
newEntry = ('é', 'e')
myTable.append(newEntry)
newEntry = ('ê', 'e')
myTable.append(newEntry)
newEntry = ('ê', 'e')
myTable.append(newEntry)
newEntry = ('ê', 'e')
myTable.append(newEntry)
newEntry = ('ë', 'e')
myTable.append(newEntry)
newEntry = ('ë', 'e')
myTable.append(newEntry)
newEntry = ('ë', 'e')
myTable.append(newEntry)
newEntry = ('ì', 'i')
myTable.append(newEntry)
newEntry = ('ì', 'i')
myTable.append(newEntry)
newEntry = ('ì', 'i')
myTable.append(newEntry)
newEntry = ('í', 'i')
myTable.append(newEntry)
newEntry = ('í', 'i')
myTable.append(newEntry)
newEntry = ('í', 'i')
myTable.append(newEntry)
newEntry = ('î', 'i')
myTable.append(newEntry)
newEntry = ('î', 'i')
myTable.append(newEntry)
newEntry = ('î', 'i')
myTable.append(newEntry)
newEntry = ('ï', 'i')
myTable.append(newEntry)
newEntry = ('ï', 'i')
myTable.append(newEntry)
newEntry = ('ï', 'i')
myTable.append(newEntry)
newEntry = ('ð', '')
myTable.append(newEntry)
newEntry = ('ð', '')
myTable.append(newEntry)
newEntry = ('ð', '')
myTable.append(newEntry)
newEntry = ('ñ', 'n')
myTable.append(newEntry)
newEntry = ('ñ', 'n')
myTable.append(newEntry)
newEntry = ('ñ', 'n')
myTable.append(newEntry)
newEntry = ('ò', 'o')
myTable.append(newEntry)
newEntry = ('ò', 'o')
myTable.append(newEntry)
newEntry = ('ò', 'o')
myTable.append(newEntry)
newEntry = ('ó', 'o')
myTable.append(newEntry)
newEntry = ('ó', 'o')
myTable.append(newEntry)
newEntry = ('ó', 'o')
myTable.append(newEntry)
newEntry = ('ô', 'o')
myTable.append(newEntry)
newEntry = ('ô', 'o')
myTable.append(newEntry)
newEntry = ('ô', 'o')
myTable.append(newEntry)
newEntry = ('õ', 'o')
myTable.append(newEntry)
newEntry = ('õ', 'o')
myTable.append(newEntry)
newEntry = ('õ', 'o')
myTable.append(newEntry)
newEntry = ('ö', 'o')
myTable.append(newEntry)
newEntry = ('ö', 'o')
myTable.append(newEntry)
newEntry = ('ö', 'o')
myTable.append(newEntry)
newEntry = ('÷', '/')
myTable.append(newEntry)
newEntry = ('÷', '/')
myTable.append(newEntry)
newEntry = ('÷', '/')
myTable.append(newEntry)
newEntry = ('ø', 'o')
myTable.append(newEntry)
newEntry = ('ø', 'o')
myTable.append(newEntry)
newEntry = ('ø', 'o')
myTable.append(newEntry)
newEntry = ('ù', 'u')
myTable.append(newEntry)
newEntry = ('ù', 'u')
myTable.append(newEntry)
newEntry = ('ù', 'u')
myTable.append(newEntry)
newEntry = ('ú', 'u')
myTable.append(newEntry)
newEntry = ('ú', 'u')
myTable.append(newEntry)
newEntry = ('ú', 'u')
myTable.append(newEntry)
newEntry = ('û', 'u')
myTable.append(newEntry)
newEntry = ('û', 'u')
myTable.append(newEntry)
newEntry = ('û', 'u')
myTable.append(newEntry)
newEntry = ('ü', 'u')
myTable.append(newEntry)
newEntry = ('ü', 'u')
myTable.append(newEntry)
newEntry = ('ü', 'u')
myTable.append(newEntry)
newEntry = ('ý', 'y')
myTable.append(newEntry)
newEntry = ('ý', 'y')
myTable.append(newEntry)
newEntry = ('ý', 'y')
myTable.append(newEntry)
newEntry = ('þ', '')
myTable.append(newEntry)
newEntry = ('þ', '')
myTable.append(newEntry)
newEntry = ('þ', '')
myTable.append(newEntry)
newEntry = ('ÿ', 'y')
myTable.append(newEntry)
newEntry = ('ÿ', 'y')
myTable.append(newEntry)
newEntry = ('ÿ', 'y')
myTable.append(newEntry)
newEntry = ('‚', '\'')
myTable.append(newEntry)
newEntry = ('ƒ', 'f')
myTable.append(newEntry)
newEntry = ('„', '\"')
myTable.append(newEntry)
newEntry = ('…', '.')
myTable.append(newEntry)
newEntry = ('†', '')
myTable.append(newEntry)
newEntry = ('‡', '')
myTable.append(newEntry)
newEntry = ('ˆ', '')
myTable.append(newEntry)
newEntry = ('‰', '%')
myTable.append(newEntry)
newEntry = ('Š', 'S')
myTable.append(newEntry)
newEntry = ('‹', '`')
myTable.append(newEntry)
newEntry = ('Œ', '')
myTable.append(newEntry)
newEntry = ('‘', '`')
myTable.append(newEntry)
newEntry = ('’', '`')
myTable.append(newEntry)
newEntry = ('“', '`')
myTable.append(newEntry)
newEntry = ('”', '`')
myTable.append(newEntry)
newEntry = ('•', '-')
myTable.append(newEntry)
newEntry = ('–', '-')
myTable.append(newEntry)
newEntry = ('—', '-')
myTable.append(newEntry)
newEntry = ('˜', '`')
myTable.append(newEntry)
newEntry = ('™', '(TM)')
myTable.append(newEntry)
newEntry = ('š', 's')
myTable.append(newEntry)
newEntry = ('›', '`')
myTable.append(newEntry)
newEntry = ('œ', '')
myTable.append(newEntry)
newEntry = ('Ÿ', 'Y')
myTable.append(newEntry)
newEntry = ('\xc3\xa2\xe2\x82\xac\xc5\x93', '`')
myTable.append(newEntry)
newEntry = ('\xc3\xa2\xe2\x82\xac\xc2\x9d', '`')
myTable.append(newEntry)
newEntry = ('\xc3\xa2\xe2\x82\xac\xe2\x80\x9c', '..')
myTable.append(newEntry)
newEntry = ('20%', ' ')
myTable.append(newEntry)
newEntry = ('23%', '#')
myTable.append(newEntry)
newEntry = ('24%', '$')
myTable.append(newEntry)
newEntry = ('25%', '%')
myTable.append(newEntry)
newEntry = ('26%', '&')
myTable.append(newEntry)
newEntry = ('%2F', '/')
myTable.append(newEntry)
newEntry = ('%3A', ':')
myTable.append(newEntry)
newEntry = ('%3B', ';')
myTable.append(newEntry)
newEntry = ('%3C', '<')
myTable.append(newEntry)
newEntry = ('%3D', '=')
myTable.append(newEntry)
newEntry = ('%3E', '>')
myTable.append(newEntry)
newEntry = ('%3F', '?')
myTable.append(newEntry)
newEntry = ('40%', '@')
myTable.append(newEntry)
newEntry = ('%5B', '[')
myTable.append(newEntry)
newEntry = ('%5C', '\\')
myTable.append(newEntry)
newEntry = ('%5D', ']')
myTable.append(newEntry)
newEntry = ('%5E', '^')
myTable.append(newEntry)
newEntry = ('60%', '`')
myTable.append(newEntry)
newEntry = ('%7B', '{')
myTable.append(newEntry)
newEntry = ('%7C', '|')
myTable.append(newEntry)
newEntry = ('%7D', '}')
myTable.append(newEntry)
newEntry = ('%7E', '~')
myTable.append(newEntry)
return myTable
| 31.457726
| 83
| 0.540361
|
edfde695c572192cfdfee3a00d37e1791ca048bf
| 4,127
|
py
|
Python
|
src/training/experiment_hard_label.py
|
XieResearchGroup/PLANS
|
479e97f5944dcc036d5f4204890a371ebafb394a
|
[
"MIT"
] | null | null | null |
src/training/experiment_hard_label.py
|
XieResearchGroup/PLANS
|
479e97f5944dcc036d5f4204890a371ebafb394a
|
[
"MIT"
] | null | null | null |
src/training/experiment_hard_label.py
|
XieResearchGroup/PLANS
|
479e97f5944dcc036d5f4204890a371ebafb394a
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from functools import partial
import tensorflow as tf
import numpy as np
from ..models.hmlc import HMLC, HMLC_M
from ..data_loaders.cvs_loader import CVSLoader
from ..utils.label_convertors import convert2vec, hierarchical, convert2hier
from ..utils.label_convertors import fill_unlabeled
from .train_model import train_model
from .training_args import TrainingArgs
def main(data_path,
DataLoader=CVSLoader,
columns=["ECFP", "Label"],
learning_rate=0.001,
drop_rate=0.3,
batch_size=128,
epochs=30,
es_patience=5,
log_path="../logs",
comment=None,
if_hard=False,
unlabeled_weight=1.0):
# Data
data_loader = DataLoader(data_path)
x_train, y_train, x_test, y_test = data_loader.load_data(columns)
x_train = convert2vec(x_train, dtype=float)
y_train = convert2hier(y_train, dtype=float)
x_test = convert2vec(x_test, dtype=float)
y_val = convert2hier(y_test, dtype=float) # for validation during training
y_eval = convert2vec(y_test, dtype=int) # for evaluation after training
data_pred = data_loader.load_unlabeled(["ECFP", "Label"])
x_pred = data_pred[:, 0]
x_pred = convert2vec(x_pred, dtype=float)
# Open log
now = datetime.now()
timestamp = now.strftime(r"%Y%m%d_%H%M%S")
log_path = os.path.sep.join(log_path.split("/"))
log_path = os.path.join(log_path, timestamp)
os.makedirs(log_path, exist_ok=True)
log_f_path = os.path.join(log_path, "logs.txt")
log_f = open(log_f_path, "w")
# Set up the train_model function
my_train_model = partial(
train_model,
learning_rate=learning_rate,
unlabeled_weight=1.0,
batch_size=batch_size,
epochs=epochs,
es_patience=es_patience,
log_path=log_path,
log_fh=log_f,
comment=comment
)
# Train model1
# - Initialize model1
model1 = HMLC(drop_rate=drop_rate)
# - Training
log_f.write("Training teacher model: \n")
my_train_model(
model=model1,
x_train=x_train,
y_train=y_train,
x_test=x_test,
y_val=y_val,
y_eval=y_eval)
# - Predict labels for unlabeled data with model1
predictions = model1.predict(x_pred)[:, -5:]
y_pred_soft = fill_unlabeled(
predictions, data_pred[:, 1], hard_label=False)
y_pred_soft = hierarchical(y_pred_soft)
y_pred_hard = fill_unlabeled(predictions, data_pred[:, 1], hard_label=True)
y_pred_hard = hierarchical(y_pred_hard)
# - Combine labeled and soft-labeled unlabeled training data
x_mix = np.concatenate([x_train, x_pred], axis=0)
y_mix = np.concatenate([y_train, y_pred_soft], axis=0)
randomed_idx = np.random.permutation(x_mix.shape[0])
np.take(x_mix, randomed_idx, axis=0, out=x_mix)
np.take(y_mix, randomed_idx, axis=0, out=y_mix)
# Train model2 with soft labels
tf.keras.backend.clear_session()
log_f.write("Train HMLC_M with soft labels: \n")
model2 = HMLC_M(drop_rate=drop_rate)
# - Training
my_train_model(
model=model2,
x_train=x_mix,
y_train=y_mix,
x_test=x_test,
y_val=y_val,
y_eval=y_eval)
# - Combine labeled and hard-labeled unlabeled training data
x_mix = np.concatenate([x_train, x_pred], axis=0)
y_mix = np.concatenate([y_train, y_pred_hard], axis=0)
randomed_idx = np.random.permutation(x_mix.shape[0])
np.take(x_mix, randomed_idx, axis=0, out=x_mix)
np.take(y_mix, randomed_idx, axis=0, out=y_mix)
# Train model3 with hard labels
tf.keras.backend.clear_session()
log_f.write("Train HMLC_M model with hard labels:\n")
model3 = HMLC_M(drop_rate=drop_rate)
# - Training
my_train_model(
model=model3,
x_train=x_mix,
y_train=y_mix,
x_test=x_test,
y_val=y_val,
y_eval=y_eval)
log_f.write("#"*40 + "\n")
log_f.close()
if __name__ == "__main__":
parser = TrainingArgs()
args = parser.parse_args()
main(**vars(args))
| 30.57037
| 79
| 0.66489
|
20a46dbde289013ba2a5ed040a5fff392045299c
| 1,354
|
py
|
Python
|
contrib_src/inference.py
|
modelhub-ai/sfm-learner-pose
|
ea49439ae713d555a87dd55d9d1f07f7461ab1f8
|
[
"MIT"
] | null | null | null |
contrib_src/inference.py
|
modelhub-ai/sfm-learner-pose
|
ea49439ae713d555a87dd55d9d1f07f7461ab1f8
|
[
"MIT"
] | null | null | null |
contrib_src/inference.py
|
modelhub-ai/sfm-learner-pose
|
ea49439ae713d555a87dd55d9d1f07f7461ab1f8
|
[
"MIT"
] | null | null | null |
#import onnx
import tensorflow as tf
import json
from processing import ImageProcessor
from modelhublib.model import ModelBase
import PIL
import tensorflow as tf
import numpy as np
from tensorflow.python.saved_model import tag_constants
class Model(ModelBase):
def __init__(self):
# load config file
config = json.load(open("model/config.json"))
# get the image processor
self._imageProcessor = ImageProcessor(config)
# load the DL model (change this if you are not using ONNX)
#self._model = onnx.load('model/model.onnx')
#self._model = sfm
def infer(self, input):
inputAsNpArr = self._imageProcessor.loadAndPreprocess(input)
# load preprocessed input
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as sess:
tf.saved_model.loader.load(
sess,
[tag_constants.SERVING],
'model/',
)
input_uint8 = graph.get_tensor_by_name('raw_input:0')
prediction = graph.get_tensor_by_name('pose_prediction/pose_exp_net/pose/mul:0')
pred = sess.run(prediction, feed_dict={input_uint8: inputAsNpArr[None,:,:,:]})
output = self._imageProcessor.computeOutput(pred)
return output
| 33.02439
| 96
| 0.633678
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.